1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2011-2016 Synaptics Incorporated
4 * Copyright (c) 2011 Unixphere
5 *
6 * This driver provides the core support for a single RMI4-based device.
7 *
8 * The RMI4 specification can be found here (URL split for line length):
9 *
10 * http://www.synaptics.com/sites/default/files/
11 * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
12 */
13
14 #include <linux/bitmap.h>
15 #include <linux/delay.h>
16 #include <linux/fs.h>
17 #include <linux/irq.h>
18 #include <linux/pm.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/irqdomain.h>
22 #include <uapi/linux/input.h>
23 #include <linux/rmi.h>
24 #include "rmi_bus.h"
25 #include "rmi_driver.h"
26
27 #define HAS_NONSTANDARD_PDT_MASK 0x40
28 #define RMI4_MAX_PAGE 0xff
29 #define RMI4_PAGE_SIZE 0x100
30 #define RMI4_PAGE_MASK 0xFF00
31
32 #define RMI_DEVICE_RESET_CMD 0x01
33 #define DEFAULT_RESET_DELAY_MS 100
34
rmi_free_function_list(struct rmi_device * rmi_dev)35 void rmi_free_function_list(struct rmi_device *rmi_dev)
36 {
37 struct rmi_function *fn, *tmp;
38 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
39
40 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
41
42 /* Doing it in the reverse order so F01 will be removed last */
43 list_for_each_entry_safe_reverse(fn, tmp,
44 &data->function_list, node) {
45 list_del(&fn->node);
46 rmi_unregister_function(fn);
47 }
48
49 devm_kfree(&rmi_dev->dev, data->irq_memory);
50 data->irq_memory = NULL;
51 data->irq_status = NULL;
52 data->fn_irq_bits = NULL;
53 data->current_irq_mask = NULL;
54 data->new_irq_mask = NULL;
55
56 data->f01_container = NULL;
57 data->f34_container = NULL;
58 }
59
reset_one_function(struct rmi_function * fn)60 static int reset_one_function(struct rmi_function *fn)
61 {
62 struct rmi_function_handler *fh;
63 int retval = 0;
64
65 if (!fn || !fn->dev.driver)
66 return 0;
67
68 fh = to_rmi_function_handler(fn->dev.driver);
69 if (fh->reset) {
70 retval = fh->reset(fn);
71 if (retval < 0)
72 dev_err(&fn->dev, "Reset failed with code %d.\n",
73 retval);
74 }
75
76 return retval;
77 }
78
configure_one_function(struct rmi_function * fn)79 static int configure_one_function(struct rmi_function *fn)
80 {
81 struct rmi_function_handler *fh;
82 int retval = 0;
83
84 if (!fn || !fn->dev.driver)
85 return 0;
86
87 fh = to_rmi_function_handler(fn->dev.driver);
88 if (fh->config) {
89 retval = fh->config(fn);
90 if (retval < 0)
91 dev_err(&fn->dev, "Config failed with code %d.\n",
92 retval);
93 }
94
95 return retval;
96 }
97
rmi_driver_process_reset_requests(struct rmi_device * rmi_dev)98 static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
99 {
100 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
101 struct rmi_function *entry;
102 int retval;
103
104 list_for_each_entry(entry, &data->function_list, node) {
105 retval = reset_one_function(entry);
106 if (retval < 0)
107 return retval;
108 }
109
110 return 0;
111 }
112
rmi_driver_process_config_requests(struct rmi_device * rmi_dev)113 static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
114 {
115 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
116 struct rmi_function *entry;
117 int retval;
118
119 list_for_each_entry(entry, &data->function_list, node) {
120 retval = configure_one_function(entry);
121 if (retval < 0)
122 return retval;
123 }
124
125 return 0;
126 }
127
rmi_process_interrupt_requests(struct rmi_device * rmi_dev)128 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
129 {
130 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
131 struct device *dev = &rmi_dev->dev;
132 int i;
133 int error;
134
135 if (!data)
136 return 0;
137
138 if (!data->attn_data.data) {
139 error = rmi_read_block(rmi_dev,
140 data->f01_container->fd.data_base_addr + 1,
141 data->irq_status, data->num_of_irq_regs);
142 if (error < 0) {
143 dev_err(dev, "Failed to read irqs, code=%d\n", error);
144 return error;
145 }
146 }
147
148 mutex_lock(&data->irq_mutex);
149 bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
150 data->irq_count);
151 /*
152 * At this point, irq_status has all bits that are set in the
153 * interrupt status register and are enabled.
154 */
155 mutex_unlock(&data->irq_mutex);
156
157 for_each_set_bit(i, data->irq_status, data->irq_count)
158 handle_nested_irq(irq_find_mapping(data->irqdomain, i));
159
160 if (data->input)
161 input_sync(data->input);
162
163 return 0;
164 }
165
rmi_set_attn_data(struct rmi_device * rmi_dev,unsigned long irq_status,void * data,size_t size)166 void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
167 void *data, size_t size)
168 {
169 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
170 struct rmi4_attn_data attn_data;
171 void *fifo_data;
172
173 if (!drvdata->enabled)
174 return;
175
176 fifo_data = kmemdup(data, size, GFP_ATOMIC);
177 if (!fifo_data)
178 return;
179
180 attn_data.irq_status = irq_status;
181 attn_data.size = size;
182 attn_data.data = fifo_data;
183
184 kfifo_put(&drvdata->attn_fifo, attn_data);
185 }
186 EXPORT_SYMBOL_GPL(rmi_set_attn_data);
187
rmi_irq_fn(int irq,void * dev_id)188 static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
189 {
190 struct rmi_device *rmi_dev = dev_id;
191 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
192 struct rmi4_attn_data attn_data = {0};
193 int ret, count;
194
195 count = kfifo_get(&drvdata->attn_fifo, &attn_data);
196 if (count) {
197 *(drvdata->irq_status) = attn_data.irq_status;
198 drvdata->attn_data = attn_data;
199 }
200
201 ret = rmi_process_interrupt_requests(rmi_dev);
202 if (ret)
203 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
204 "Failed to process interrupt request: %d\n", ret);
205
206 if (count) {
207 kfree(attn_data.data);
208 drvdata->attn_data.data = NULL;
209 }
210
211 if (!kfifo_is_empty(&drvdata->attn_fifo))
212 return rmi_irq_fn(irq, dev_id);
213
214 return IRQ_HANDLED;
215 }
216
rmi_irq_init(struct rmi_device * rmi_dev)217 static int rmi_irq_init(struct rmi_device *rmi_dev)
218 {
219 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
220 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
221 int irq_flags = irq_get_trigger_type(pdata->irq);
222 int ret;
223
224 if (!irq_flags)
225 irq_flags = IRQF_TRIGGER_LOW;
226
227 ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
228 rmi_irq_fn, irq_flags | IRQF_ONESHOT,
229 dev_driver_string(rmi_dev->xport->dev),
230 rmi_dev);
231 if (ret < 0) {
232 dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
233 pdata->irq);
234
235 return ret;
236 }
237
238 data->enabled = true;
239
240 return 0;
241 }
242
rmi_find_function(struct rmi_device * rmi_dev,u8 number)243 struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number)
244 {
245 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
246 struct rmi_function *entry;
247
248 list_for_each_entry(entry, &data->function_list, node) {
249 if (entry->fd.function_number == number)
250 return entry;
251 }
252
253 return NULL;
254 }
255
suspend_one_function(struct rmi_function * fn)256 static int suspend_one_function(struct rmi_function *fn)
257 {
258 struct rmi_function_handler *fh;
259 int retval = 0;
260
261 if (!fn || !fn->dev.driver)
262 return 0;
263
264 fh = to_rmi_function_handler(fn->dev.driver);
265 if (fh->suspend) {
266 retval = fh->suspend(fn);
267 if (retval < 0)
268 dev_err(&fn->dev, "Suspend failed with code %d.\n",
269 retval);
270 }
271
272 return retval;
273 }
274
rmi_suspend_functions(struct rmi_device * rmi_dev)275 static int rmi_suspend_functions(struct rmi_device *rmi_dev)
276 {
277 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
278 struct rmi_function *entry;
279 int retval;
280
281 list_for_each_entry(entry, &data->function_list, node) {
282 retval = suspend_one_function(entry);
283 if (retval < 0)
284 return retval;
285 }
286
287 return 0;
288 }
289
resume_one_function(struct rmi_function * fn)290 static int resume_one_function(struct rmi_function *fn)
291 {
292 struct rmi_function_handler *fh;
293 int retval = 0;
294
295 if (!fn || !fn->dev.driver)
296 return 0;
297
298 fh = to_rmi_function_handler(fn->dev.driver);
299 if (fh->resume) {
300 retval = fh->resume(fn);
301 if (retval < 0)
302 dev_err(&fn->dev, "Resume failed with code %d.\n",
303 retval);
304 }
305
306 return retval;
307 }
308
rmi_resume_functions(struct rmi_device * rmi_dev)309 static int rmi_resume_functions(struct rmi_device *rmi_dev)
310 {
311 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
312 struct rmi_function *entry;
313 int retval;
314
315 list_for_each_entry(entry, &data->function_list, node) {
316 retval = resume_one_function(entry);
317 if (retval < 0)
318 return retval;
319 }
320
321 return 0;
322 }
323
rmi_enable_sensor(struct rmi_device * rmi_dev)324 int rmi_enable_sensor(struct rmi_device *rmi_dev)
325 {
326 int retval = 0;
327
328 retval = rmi_driver_process_config_requests(rmi_dev);
329 if (retval < 0)
330 return retval;
331
332 return rmi_process_interrupt_requests(rmi_dev);
333 }
334
335 /**
336 * rmi_driver_set_input_params - set input device id and other data.
337 *
338 * @rmi_dev: Pointer to an RMI device
339 * @input: Pointer to input device
340 *
341 */
rmi_driver_set_input_params(struct rmi_device * rmi_dev,struct input_dev * input)342 static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
343 struct input_dev *input)
344 {
345 input->name = SYNAPTICS_INPUT_DEVICE_NAME;
346 input->id.vendor = SYNAPTICS_VENDOR_ID;
347 input->id.bustype = BUS_RMI;
348 return 0;
349 }
350
rmi_driver_set_input_name(struct rmi_device * rmi_dev,struct input_dev * input)351 static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
352 struct input_dev *input)
353 {
354 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
355 const char *device_name = rmi_f01_get_product_ID(data->f01_container);
356 char *name;
357
358 name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
359 "Synaptics %s", device_name);
360 if (!name)
361 return;
362
363 input->name = name;
364 }
365
rmi_driver_set_irq_bits(struct rmi_device * rmi_dev,unsigned long * mask)366 static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
367 unsigned long *mask)
368 {
369 int error = 0;
370 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
371 struct device *dev = &rmi_dev->dev;
372
373 mutex_lock(&data->irq_mutex);
374 bitmap_or(data->new_irq_mask,
375 data->current_irq_mask, mask, data->irq_count);
376
377 error = rmi_write_block(rmi_dev,
378 data->f01_container->fd.control_base_addr + 1,
379 data->new_irq_mask, data->num_of_irq_regs);
380 if (error < 0) {
381 dev_err(dev, "%s: Failed to change enabled interrupts!",
382 __func__);
383 goto error_unlock;
384 }
385 bitmap_copy(data->current_irq_mask, data->new_irq_mask,
386 data->num_of_irq_regs);
387
388 bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
389
390 error_unlock:
391 mutex_unlock(&data->irq_mutex);
392 return error;
393 }
394
rmi_driver_clear_irq_bits(struct rmi_device * rmi_dev,unsigned long * mask)395 static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
396 unsigned long *mask)
397 {
398 int error = 0;
399 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
400 struct device *dev = &rmi_dev->dev;
401
402 mutex_lock(&data->irq_mutex);
403 bitmap_andnot(data->fn_irq_bits,
404 data->fn_irq_bits, mask, data->irq_count);
405 bitmap_andnot(data->new_irq_mask,
406 data->current_irq_mask, mask, data->irq_count);
407
408 error = rmi_write_block(rmi_dev,
409 data->f01_container->fd.control_base_addr + 1,
410 data->new_irq_mask, data->num_of_irq_regs);
411 if (error < 0) {
412 dev_err(dev, "%s: Failed to change enabled interrupts!",
413 __func__);
414 goto error_unlock;
415 }
416 bitmap_copy(data->current_irq_mask, data->new_irq_mask,
417 data->num_of_irq_regs);
418
419 error_unlock:
420 mutex_unlock(&data->irq_mutex);
421 return error;
422 }
423
rmi_driver_reset_handler(struct rmi_device * rmi_dev)424 static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
425 {
426 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
427 int error;
428
429 /*
430 * Can get called before the driver is fully ready to deal with
431 * this situation.
432 */
433 if (!data || !data->f01_container) {
434 dev_warn(&rmi_dev->dev,
435 "Not ready to handle reset yet!\n");
436 return 0;
437 }
438
439 error = rmi_read_block(rmi_dev,
440 data->f01_container->fd.control_base_addr + 1,
441 data->current_irq_mask, data->num_of_irq_regs);
442 if (error < 0) {
443 dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
444 __func__);
445 return error;
446 }
447
448 error = rmi_driver_process_reset_requests(rmi_dev);
449 if (error < 0)
450 return error;
451
452 error = rmi_driver_process_config_requests(rmi_dev);
453 if (error < 0)
454 return error;
455
456 return 0;
457 }
458
rmi_read_pdt_entry(struct rmi_device * rmi_dev,struct pdt_entry * entry,u16 pdt_address)459 static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
460 struct pdt_entry *entry, u16 pdt_address)
461 {
462 u8 buf[RMI_PDT_ENTRY_SIZE];
463 int error;
464
465 error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
466 if (error) {
467 dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
468 pdt_address, error);
469 return error;
470 }
471
472 entry->page_start = pdt_address & RMI4_PAGE_MASK;
473 entry->query_base_addr = buf[0];
474 entry->command_base_addr = buf[1];
475 entry->control_base_addr = buf[2];
476 entry->data_base_addr = buf[3];
477 entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
478 entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
479 entry->function_number = buf[5];
480
481 return 0;
482 }
483
rmi_driver_copy_pdt_to_fd(const struct pdt_entry * pdt,struct rmi_function_descriptor * fd)484 static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
485 struct rmi_function_descriptor *fd)
486 {
487 fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
488 fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
489 fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
490 fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
491 fd->function_number = pdt->function_number;
492 fd->interrupt_source_count = pdt->interrupt_source_count;
493 fd->function_version = pdt->function_version;
494 }
495
496 #define RMI_SCAN_CONTINUE 0
497 #define RMI_SCAN_DONE 1
498
rmi_scan_pdt_page(struct rmi_device * rmi_dev,int page,int * empty_pages,void * ctx,int (* callback)(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * entry))499 static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
500 int page,
501 int *empty_pages,
502 void *ctx,
503 int (*callback)(struct rmi_device *rmi_dev,
504 void *ctx,
505 const struct pdt_entry *entry))
506 {
507 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
508 struct pdt_entry pdt_entry;
509 u16 page_start = RMI4_PAGE_SIZE * page;
510 u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
511 u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
512 u16 addr;
513 int error;
514 int retval;
515
516 for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
517 error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
518 if (error)
519 return error;
520
521 if (RMI4_END_OF_PDT(pdt_entry.function_number))
522 break;
523
524 retval = callback(rmi_dev, ctx, &pdt_entry);
525 if (retval != RMI_SCAN_CONTINUE)
526 return retval;
527 }
528
529 /*
530 * Count number of empty PDT pages. If a gap of two pages
531 * or more is found, stop scanning.
532 */
533 if (addr == pdt_start)
534 ++*empty_pages;
535 else
536 *empty_pages = 0;
537
538 return (data->bootloader_mode || *empty_pages >= 2) ?
539 RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
540 }
541
rmi_scan_pdt(struct rmi_device * rmi_dev,void * ctx,int (* callback)(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * entry))542 int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
543 int (*callback)(struct rmi_device *rmi_dev,
544 void *ctx, const struct pdt_entry *entry))
545 {
546 int page;
547 int empty_pages = 0;
548 int retval = RMI_SCAN_DONE;
549
550 for (page = 0; page <= RMI4_MAX_PAGE; page++) {
551 retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
552 ctx, callback);
553 if (retval != RMI_SCAN_CONTINUE)
554 break;
555 }
556
557 return retval < 0 ? retval : 0;
558 }
559
rmi_read_register_desc(struct rmi_device * d,u16 addr,struct rmi_register_descriptor * rdesc)560 int rmi_read_register_desc(struct rmi_device *d, u16 addr,
561 struct rmi_register_descriptor *rdesc)
562 {
563 int ret;
564 u8 size_presence_reg;
565 u8 buf[35];
566 int presense_offset = 1;
567 u8 *struct_buf;
568 int reg;
569 int offset = 0;
570 int map_offset = 0;
571 int i;
572 int b;
573
574 /*
575 * The first register of the register descriptor is the size of
576 * the register descriptor's presense register.
577 */
578 ret = rmi_read(d, addr, &size_presence_reg);
579 if (ret)
580 return ret;
581 ++addr;
582
583 if (size_presence_reg < 0 || size_presence_reg > 35)
584 return -EIO;
585
586 memset(buf, 0, sizeof(buf));
587
588 /*
589 * The presence register contains the size of the register structure
590 * and a bitmap which identified which packet registers are present
591 * for this particular register type (ie query, control, or data).
592 */
593 ret = rmi_read_block(d, addr, buf, size_presence_reg);
594 if (ret)
595 return ret;
596 ++addr;
597
598 if (buf[0] == 0) {
599 presense_offset = 3;
600 rdesc->struct_size = buf[1] | (buf[2] << 8);
601 } else {
602 rdesc->struct_size = buf[0];
603 }
604
605 for (i = presense_offset; i < size_presence_reg; i++) {
606 for (b = 0; b < 8; b++) {
607 if (buf[i] & (0x1 << b))
608 bitmap_set(rdesc->presense_map, map_offset, 1);
609 ++map_offset;
610 }
611 }
612
613 rdesc->num_registers = bitmap_weight(rdesc->presense_map,
614 RMI_REG_DESC_PRESENSE_BITS);
615
616 rdesc->registers = devm_kcalloc(&d->dev,
617 rdesc->num_registers,
618 sizeof(struct rmi_register_desc_item),
619 GFP_KERNEL);
620 if (!rdesc->registers)
621 return -ENOMEM;
622
623 /*
624 * Allocate a temporary buffer to hold the register structure.
625 * I'm not using devm_kzalloc here since it will not be retained
626 * after exiting this function
627 */
628 struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
629 if (!struct_buf)
630 return -ENOMEM;
631
632 /*
633 * The register structure contains information about every packet
634 * register of this type. This includes the size of the packet
635 * register and a bitmap of all subpackets contained in the packet
636 * register.
637 */
638 ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
639 if (ret)
640 goto free_struct_buff;
641
642 reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
643 for (i = 0; i < rdesc->num_registers; i++) {
644 struct rmi_register_desc_item *item = &rdesc->registers[i];
645 int reg_size = struct_buf[offset];
646
647 ++offset;
648 if (reg_size == 0) {
649 reg_size = struct_buf[offset] |
650 (struct_buf[offset + 1] << 8);
651 offset += 2;
652 }
653
654 if (reg_size == 0) {
655 reg_size = struct_buf[offset] |
656 (struct_buf[offset + 1] << 8) |
657 (struct_buf[offset + 2] << 16) |
658 (struct_buf[offset + 3] << 24);
659 offset += 4;
660 }
661
662 item->reg = reg;
663 item->reg_size = reg_size;
664
665 map_offset = 0;
666
667 do {
668 for (b = 0; b < 7; b++) {
669 if (struct_buf[offset] & (0x1 << b))
670 bitmap_set(item->subpacket_map,
671 map_offset, 1);
672 ++map_offset;
673 }
674 } while (struct_buf[offset++] & 0x80);
675
676 item->num_subpackets = bitmap_weight(item->subpacket_map,
677 RMI_REG_DESC_SUBPACKET_BITS);
678
679 rmi_dbg(RMI_DEBUG_CORE, &d->dev,
680 "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
681 item->reg, item->reg_size, item->num_subpackets);
682
683 reg = find_next_bit(rdesc->presense_map,
684 RMI_REG_DESC_PRESENSE_BITS, reg + 1);
685 }
686
687 free_struct_buff:
688 kfree(struct_buf);
689 return ret;
690 }
691
rmi_get_register_desc_item(struct rmi_register_descriptor * rdesc,u16 reg)692 const struct rmi_register_desc_item *rmi_get_register_desc_item(
693 struct rmi_register_descriptor *rdesc, u16 reg)
694 {
695 const struct rmi_register_desc_item *item;
696 int i;
697
698 for (i = 0; i < rdesc->num_registers; i++) {
699 item = &rdesc->registers[i];
700 if (item->reg == reg)
701 return item;
702 }
703
704 return NULL;
705 }
706
rmi_register_desc_calc_size(struct rmi_register_descriptor * rdesc)707 size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
708 {
709 const struct rmi_register_desc_item *item;
710 int i;
711 size_t size = 0;
712
713 for (i = 0; i < rdesc->num_registers; i++) {
714 item = &rdesc->registers[i];
715 size += item->reg_size;
716 }
717 return size;
718 }
719
720 /* Compute the register offset relative to the base address */
rmi_register_desc_calc_reg_offset(struct rmi_register_descriptor * rdesc,u16 reg)721 int rmi_register_desc_calc_reg_offset(
722 struct rmi_register_descriptor *rdesc, u16 reg)
723 {
724 const struct rmi_register_desc_item *item;
725 int offset = 0;
726 int i;
727
728 for (i = 0; i < rdesc->num_registers; i++) {
729 item = &rdesc->registers[i];
730 if (item->reg == reg)
731 return offset;
732 ++offset;
733 }
734 return -1;
735 }
736
rmi_register_desc_has_subpacket(const struct rmi_register_desc_item * item,u8 subpacket)737 bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
738 u8 subpacket)
739 {
740 return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
741 subpacket) == subpacket;
742 }
743
rmi_check_bootloader_mode(struct rmi_device * rmi_dev,const struct pdt_entry * pdt)744 static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
745 const struct pdt_entry *pdt)
746 {
747 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
748 int ret;
749 u8 status;
750
751 if (pdt->function_number == 0x34 && pdt->function_version > 1) {
752 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
753 if (ret) {
754 dev_err(&rmi_dev->dev,
755 "Failed to read F34 status: %d.\n", ret);
756 return ret;
757 }
758
759 if (status & BIT(7))
760 data->bootloader_mode = true;
761 } else if (pdt->function_number == 0x01) {
762 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
763 if (ret) {
764 dev_err(&rmi_dev->dev,
765 "Failed to read F01 status: %d.\n", ret);
766 return ret;
767 }
768
769 if (status & BIT(6))
770 data->bootloader_mode = true;
771 }
772
773 return 0;
774 }
775
rmi_count_irqs(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * pdt)776 static int rmi_count_irqs(struct rmi_device *rmi_dev,
777 void *ctx, const struct pdt_entry *pdt)
778 {
779 int *irq_count = ctx;
780 int ret;
781
782 *irq_count += pdt->interrupt_source_count;
783
784 ret = rmi_check_bootloader_mode(rmi_dev, pdt);
785 if (ret < 0)
786 return ret;
787
788 return RMI_SCAN_CONTINUE;
789 }
790
rmi_initial_reset(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * pdt)791 int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
792 const struct pdt_entry *pdt)
793 {
794 int error;
795
796 if (pdt->function_number == 0x01) {
797 u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
798 u8 cmd_buf = RMI_DEVICE_RESET_CMD;
799 const struct rmi_device_platform_data *pdata =
800 rmi_get_platform_data(rmi_dev);
801
802 if (rmi_dev->xport->ops->reset) {
803 error = rmi_dev->xport->ops->reset(rmi_dev->xport,
804 cmd_addr);
805 if (error)
806 return error;
807
808 return RMI_SCAN_DONE;
809 }
810
811 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
812 error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
813 if (error) {
814 dev_err(&rmi_dev->dev,
815 "Initial reset failed. Code = %d.\n", error);
816 return error;
817 }
818
819 mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
820
821 return RMI_SCAN_DONE;
822 }
823
824 /* F01 should always be on page 0. If we don't find it there, fail. */
825 return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
826 }
827
rmi_create_function(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * pdt)828 static int rmi_create_function(struct rmi_device *rmi_dev,
829 void *ctx, const struct pdt_entry *pdt)
830 {
831 struct device *dev = &rmi_dev->dev;
832 struct rmi_driver_data *data = dev_get_drvdata(dev);
833 int *current_irq_count = ctx;
834 struct rmi_function *fn;
835 int i;
836 int error;
837
838 rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
839 pdt->function_number);
840
841 fn = kzalloc(sizeof(struct rmi_function) +
842 BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
843 GFP_KERNEL);
844 if (!fn) {
845 dev_err(dev, "Failed to allocate memory for F%02X\n",
846 pdt->function_number);
847 return -ENOMEM;
848 }
849
850 INIT_LIST_HEAD(&fn->node);
851 rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
852
853 fn->rmi_dev = rmi_dev;
854
855 fn->num_of_irqs = pdt->interrupt_source_count;
856 fn->irq_pos = *current_irq_count;
857 *current_irq_count += fn->num_of_irqs;
858
859 for (i = 0; i < fn->num_of_irqs; i++)
860 set_bit(fn->irq_pos + i, fn->irq_mask);
861
862 error = rmi_register_function(fn);
863 if (error)
864 return error;
865
866 if (pdt->function_number == 0x01)
867 data->f01_container = fn;
868 else if (pdt->function_number == 0x34)
869 data->f34_container = fn;
870
871 list_add_tail(&fn->node, &data->function_list);
872
873 return RMI_SCAN_CONTINUE;
874 }
875
rmi_enable_irq(struct rmi_device * rmi_dev,bool clear_wake)876 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
877 {
878 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
879 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
880 int irq = pdata->irq;
881 int irq_flags;
882 int retval;
883
884 mutex_lock(&data->enabled_mutex);
885
886 if (data->enabled)
887 goto out;
888
889 enable_irq(irq);
890 data->enabled = true;
891 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
892 retval = disable_irq_wake(irq);
893 if (retval)
894 dev_warn(&rmi_dev->dev,
895 "Failed to disable irq for wake: %d\n",
896 retval);
897 }
898
899 /*
900 * Call rmi_process_interrupt_requests() after enabling irq,
901 * otherwise we may lose interrupt on edge-triggered systems.
902 */
903 irq_flags = irq_get_trigger_type(pdata->irq);
904 if (irq_flags & IRQ_TYPE_EDGE_BOTH)
905 rmi_process_interrupt_requests(rmi_dev);
906
907 out:
908 mutex_unlock(&data->enabled_mutex);
909 }
910
rmi_disable_irq(struct rmi_device * rmi_dev,bool enable_wake)911 void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
912 {
913 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
914 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
915 struct rmi4_attn_data attn_data = {0};
916 int irq = pdata->irq;
917 int retval, count;
918
919 mutex_lock(&data->enabled_mutex);
920
921 if (!data->enabled)
922 goto out;
923
924 data->enabled = false;
925 disable_irq(irq);
926 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
927 retval = enable_irq_wake(irq);
928 if (retval)
929 dev_warn(&rmi_dev->dev,
930 "Failed to enable irq for wake: %d\n",
931 retval);
932 }
933
934 /* make sure the fifo is clean */
935 while (!kfifo_is_empty(&data->attn_fifo)) {
936 count = kfifo_get(&data->attn_fifo, &attn_data);
937 if (count)
938 kfree(attn_data.data);
939 }
940
941 out:
942 mutex_unlock(&data->enabled_mutex);
943 }
944
rmi_driver_suspend(struct rmi_device * rmi_dev,bool enable_wake)945 int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
946 {
947 int retval;
948
949 retval = rmi_suspend_functions(rmi_dev);
950 if (retval)
951 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
952 retval);
953
954 rmi_disable_irq(rmi_dev, enable_wake);
955 return retval;
956 }
957 EXPORT_SYMBOL_GPL(rmi_driver_suspend);
958
rmi_driver_resume(struct rmi_device * rmi_dev,bool clear_wake)959 int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
960 {
961 int retval;
962
963 rmi_enable_irq(rmi_dev, clear_wake);
964
965 retval = rmi_resume_functions(rmi_dev);
966 if (retval)
967 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
968 retval);
969
970 return retval;
971 }
972 EXPORT_SYMBOL_GPL(rmi_driver_resume);
973
rmi_driver_remove(struct device * dev)974 static int rmi_driver_remove(struct device *dev)
975 {
976 struct rmi_device *rmi_dev = to_rmi_device(dev);
977 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
978
979 rmi_disable_irq(rmi_dev, false);
980
981 rmi_f34_remove_sysfs(rmi_dev);
982 rmi_free_function_list(rmi_dev);
983
984 irq_domain_remove(data->irqdomain);
985 data->irqdomain = NULL;
986
987 return 0;
988 }
989
990 #ifdef CONFIG_OF
rmi_driver_of_probe(struct device * dev,struct rmi_device_platform_data * pdata)991 static int rmi_driver_of_probe(struct device *dev,
992 struct rmi_device_platform_data *pdata)
993 {
994 int retval;
995
996 retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
997 "syna,reset-delay-ms", 1);
998 if (retval)
999 return retval;
1000
1001 return 0;
1002 }
1003 #else
rmi_driver_of_probe(struct device * dev,struct rmi_device_platform_data * pdata)1004 static inline int rmi_driver_of_probe(struct device *dev,
1005 struct rmi_device_platform_data *pdata)
1006 {
1007 return -ENODEV;
1008 }
1009 #endif
1010
rmi_probe_interrupts(struct rmi_driver_data * data)1011 int rmi_probe_interrupts(struct rmi_driver_data *data)
1012 {
1013 struct rmi_device *rmi_dev = data->rmi_dev;
1014 struct device *dev = &rmi_dev->dev;
1015 struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
1016 int irq_count = 0;
1017 size_t size;
1018 int retval;
1019
1020 /*
1021 * We need to count the IRQs and allocate their storage before scanning
1022 * the PDT and creating the function entries, because adding a new
1023 * function can trigger events that result in the IRQ related storage
1024 * being accessed.
1025 */
1026 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
1027 data->bootloader_mode = false;
1028
1029 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
1030 if (retval < 0) {
1031 dev_err(dev, "IRQ counting failed with code %d.\n", retval);
1032 return retval;
1033 }
1034
1035 if (data->bootloader_mode)
1036 dev_warn(dev, "Device in bootloader mode.\n");
1037
1038 /* Allocate and register a linear revmap irq_domain */
1039 data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
1040 &irq_domain_simple_ops,
1041 data);
1042 if (!data->irqdomain) {
1043 dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
1044 return -ENOMEM;
1045 }
1046
1047 data->irq_count = irq_count;
1048 data->num_of_irq_regs = (data->irq_count + 7) / 8;
1049
1050 size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
1051 data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL);
1052 if (!data->irq_memory) {
1053 dev_err(dev, "Failed to allocate memory for irq masks.\n");
1054 return -ENOMEM;
1055 }
1056
1057 data->irq_status = data->irq_memory + size * 0;
1058 data->fn_irq_bits = data->irq_memory + size * 1;
1059 data->current_irq_mask = data->irq_memory + size * 2;
1060 data->new_irq_mask = data->irq_memory + size * 3;
1061
1062 return retval;
1063 }
1064
rmi_init_functions(struct rmi_driver_data * data)1065 int rmi_init_functions(struct rmi_driver_data *data)
1066 {
1067 struct rmi_device *rmi_dev = data->rmi_dev;
1068 struct device *dev = &rmi_dev->dev;
1069 int irq_count = 0;
1070 int retval;
1071
1072 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
1073 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
1074 if (retval < 0) {
1075 dev_err(dev, "Function creation failed with code %d.\n",
1076 retval);
1077 goto err_destroy_functions;
1078 }
1079
1080 if (!data->f01_container) {
1081 dev_err(dev, "Missing F01 container!\n");
1082 retval = -EINVAL;
1083 goto err_destroy_functions;
1084 }
1085
1086 retval = rmi_read_block(rmi_dev,
1087 data->f01_container->fd.control_base_addr + 1,
1088 data->current_irq_mask, data->num_of_irq_regs);
1089 if (retval < 0) {
1090 dev_err(dev, "%s: Failed to read current IRQ mask.\n",
1091 __func__);
1092 goto err_destroy_functions;
1093 }
1094
1095 return 0;
1096
1097 err_destroy_functions:
1098 rmi_free_function_list(rmi_dev);
1099 return retval;
1100 }
1101
rmi_driver_probe(struct device * dev)1102 static int rmi_driver_probe(struct device *dev)
1103 {
1104 struct rmi_driver *rmi_driver;
1105 struct rmi_driver_data *data;
1106 struct rmi_device_platform_data *pdata;
1107 struct rmi_device *rmi_dev;
1108 int retval;
1109
1110 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
1111 __func__);
1112
1113 if (!rmi_is_physical_device(dev)) {
1114 rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
1115 return -ENODEV;
1116 }
1117
1118 rmi_dev = to_rmi_device(dev);
1119 rmi_driver = to_rmi_driver(dev->driver);
1120 rmi_dev->driver = rmi_driver;
1121
1122 pdata = rmi_get_platform_data(rmi_dev);
1123
1124 if (rmi_dev->xport->dev->of_node) {
1125 retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
1126 if (retval)
1127 return retval;
1128 }
1129
1130 data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
1131 if (!data)
1132 return -ENOMEM;
1133
1134 INIT_LIST_HEAD(&data->function_list);
1135 data->rmi_dev = rmi_dev;
1136 dev_set_drvdata(&rmi_dev->dev, data);
1137
1138 /*
1139 * Right before a warm boot, the sensor might be in some unusual state,
1140 * such as F54 diagnostics, or F34 bootloader mode after a firmware
1141 * or configuration update. In order to clear the sensor to a known
1142 * state and/or apply any updates, we issue a initial reset to clear any
1143 * previous settings and force it into normal operation.
1144 *
1145 * We have to do this before actually building the PDT because
1146 * the reflash updates (if any) might cause various registers to move
1147 * around.
1148 *
1149 * For a number of reasons, this initial reset may fail to return
1150 * within the specified time, but we'll still be able to bring up the
1151 * driver normally after that failure. This occurs most commonly in
1152 * a cold boot situation (where then firmware takes longer to come up
1153 * than from a warm boot) and the reset_delay_ms in the platform data
1154 * has been set too short to accommodate that. Since the sensor will
1155 * eventually come up and be usable, we don't want to just fail here
1156 * and leave the customer's device unusable. So we warn them, and
1157 * continue processing.
1158 */
1159 retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
1160 if (retval < 0)
1161 dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
1162
1163 retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
1164 if (retval < 0) {
1165 /*
1166 * we'll print out a warning and continue since
1167 * failure to get the PDT properties is not a cause to fail
1168 */
1169 dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
1170 PDT_PROPERTIES_LOCATION, retval);
1171 }
1172
1173 mutex_init(&data->irq_mutex);
1174 mutex_init(&data->enabled_mutex);
1175
1176 retval = rmi_probe_interrupts(data);
1177 if (retval)
1178 goto err;
1179
1180 if (rmi_dev->xport->input) {
1181 /*
1182 * The transport driver already has an input device.
1183 * In some cases it is preferable to reuse the transport
1184 * devices input device instead of creating a new one here.
1185 * One example is some HID touchpads report "pass-through"
1186 * button events are not reported by rmi registers.
1187 */
1188 data->input = rmi_dev->xport->input;
1189 } else {
1190 data->input = devm_input_allocate_device(dev);
1191 if (!data->input) {
1192 dev_err(dev, "%s: Failed to allocate input device.\n",
1193 __func__);
1194 retval = -ENOMEM;
1195 goto err;
1196 }
1197 rmi_driver_set_input_params(rmi_dev, data->input);
1198 data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
1199 "%s/input0", dev_name(dev));
1200 if (!data->input->phys) {
1201 retval = -ENOMEM;
1202 goto err;
1203 }
1204 }
1205
1206 retval = rmi_init_functions(data);
1207 if (retval)
1208 goto err;
1209
1210 retval = rmi_f34_create_sysfs(rmi_dev);
1211 if (retval)
1212 goto err;
1213
1214 if (data->input) {
1215 rmi_driver_set_input_name(rmi_dev, data->input);
1216 if (!rmi_dev->xport->input) {
1217 retval = input_register_device(data->input);
1218 if (retval) {
1219 dev_err(dev, "%s: Failed to register input device.\n",
1220 __func__);
1221 goto err_destroy_functions;
1222 }
1223 }
1224 }
1225
1226 retval = rmi_irq_init(rmi_dev);
1227 if (retval < 0)
1228 goto err_destroy_functions;
1229
1230 if (data->f01_container->dev.driver) {
1231 /* Driver already bound, so enable ATTN now. */
1232 retval = rmi_enable_sensor(rmi_dev);
1233 if (retval)
1234 goto err_disable_irq;
1235 }
1236
1237 return 0;
1238
1239 err_disable_irq:
1240 rmi_disable_irq(rmi_dev, false);
1241 err_destroy_functions:
1242 rmi_free_function_list(rmi_dev);
1243 err:
1244 return retval;
1245 }
1246
1247 static struct rmi_driver rmi_physical_driver = {
1248 .driver = {
1249 .owner = THIS_MODULE,
1250 .name = "rmi4_physical",
1251 .bus = &rmi_bus_type,
1252 .probe = rmi_driver_probe,
1253 .remove = rmi_driver_remove,
1254 },
1255 .reset_handler = rmi_driver_reset_handler,
1256 .clear_irq_bits = rmi_driver_clear_irq_bits,
1257 .set_irq_bits = rmi_driver_set_irq_bits,
1258 .set_input_params = rmi_driver_set_input_params,
1259 };
1260
rmi_is_physical_driver(const struct device_driver * drv)1261 bool rmi_is_physical_driver(const struct device_driver *drv)
1262 {
1263 return drv == &rmi_physical_driver.driver;
1264 }
1265
rmi_register_physical_driver(void)1266 int __init rmi_register_physical_driver(void)
1267 {
1268 int error;
1269
1270 error = driver_register(&rmi_physical_driver.driver);
1271 if (error) {
1272 pr_err("%s: driver register failed, code=%d.\n", __func__,
1273 error);
1274 return error;
1275 }
1276
1277 return 0;
1278 }
1279
rmi_unregister_physical_driver(void)1280 void __exit rmi_unregister_physical_driver(void)
1281 {
1282 driver_unregister(&rmi_physical_driver.driver);
1283 }
1284