1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2022 ROHM Semiconductors
4 *
5 * ROHM/KIONIX accelerometer driver
6 */
7
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/mutex.h>
14 #include <linux/property.h>
15 #include <linux/regmap.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/slab.h>
18 #include <linux/string_choices.h>
19 #include <linux/types.h>
20 #include <linux/units.h>
21
22 #include <linux/iio/iio.h>
23 #include <linux/iio/sysfs.h>
24 #include <linux/iio/trigger.h>
25 #include <linux/iio/trigger_consumer.h>
26 #include <linux/iio/triggered_buffer.h>
27
28 #include "kionix-kx022a.h"
29
30 /*
31 * The KX022A has FIFO which can store 43 samples of HiRes data from 2
32 * channels. This equals to 43 (samples) * 3 (channels) * 2 (bytes/sample) to
33 * 258 bytes of sample data. The quirk to know is that the amount of bytes in
34 * the FIFO is advertised via 8 bit register (max value 255). The thing to note
35 * is that full 258 bytes of data is indicated using the max value 255.
36 */
37 #define KX022A_FIFO_LENGTH 43
38 #define KX022A_FIFO_FULL_VALUE 255
39 #define KX022A_SOFT_RESET_WAIT_TIME_US (5 * USEC_PER_MSEC)
40 #define KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US (500 * USEC_PER_MSEC)
41
42 /* 3 axis, 2 bytes of data for each of the axis */
43 #define KX022A_FIFO_SAMPLES_SIZE_BYTES 6
44 #define KX022A_FIFO_MAX_BYTES \
45 (KX022A_FIFO_LENGTH * KX022A_FIFO_SAMPLES_SIZE_BYTES)
46
47 enum {
48 KX022A_STATE_SAMPLE,
49 KX022A_STATE_FIFO,
50 };
51
52 /* kx022a Regmap configs */
53 static const struct regmap_range kx022a_volatile_ranges[] = {
54 {
55 .range_min = KX022A_REG_XHP_L,
56 .range_max = KX022A_REG_COTR,
57 }, {
58 .range_min = KX022A_REG_TSCP,
59 .range_max = KX022A_REG_INT_REL,
60 }, {
61 /* The reset bit will be cleared by sensor */
62 .range_min = KX022A_REG_CNTL2,
63 .range_max = KX022A_REG_CNTL2,
64 }, {
65 .range_min = KX022A_REG_BUF_STATUS_1,
66 .range_max = KX022A_REG_BUF_READ,
67 },
68 };
69
70 static const struct regmap_access_table kx022a_volatile_regs = {
71 .yes_ranges = &kx022a_volatile_ranges[0],
72 .n_yes_ranges = ARRAY_SIZE(kx022a_volatile_ranges),
73 };
74
75 static const struct regmap_range kx022a_precious_ranges[] = {
76 {
77 .range_min = KX022A_REG_INT_REL,
78 .range_max = KX022A_REG_INT_REL,
79 },
80 };
81
82 static const struct regmap_access_table kx022a_precious_regs = {
83 .yes_ranges = &kx022a_precious_ranges[0],
84 .n_yes_ranges = ARRAY_SIZE(kx022a_precious_ranges),
85 };
86
87 /*
88 * The HW does not set WHO_AM_I reg as read-only but we don't want to write it
89 * so we still include it in the read-only ranges.
90 */
91 static const struct regmap_range kx022a_read_only_ranges[] = {
92 {
93 .range_min = KX022A_REG_XHP_L,
94 .range_max = KX022A_REG_INT_REL,
95 }, {
96 .range_min = KX022A_REG_BUF_STATUS_1,
97 .range_max = KX022A_REG_BUF_STATUS_2,
98 }, {
99 .range_min = KX022A_REG_BUF_READ,
100 .range_max = KX022A_REG_BUF_READ,
101 },
102 };
103
104 static const struct regmap_access_table kx022a_ro_regs = {
105 .no_ranges = &kx022a_read_only_ranges[0],
106 .n_no_ranges = ARRAY_SIZE(kx022a_read_only_ranges),
107 };
108
109 static const struct regmap_range kx022a_write_only_ranges[] = {
110 {
111 .range_min = KX022A_REG_BTS_WUF_TH,
112 .range_max = KX022A_REG_BTS_WUF_TH,
113 }, {
114 .range_min = KX022A_REG_MAN_WAKE,
115 .range_max = KX022A_REG_MAN_WAKE,
116 }, {
117 .range_min = KX022A_REG_SELF_TEST,
118 .range_max = KX022A_REG_SELF_TEST,
119 }, {
120 .range_min = KX022A_REG_BUF_CLEAR,
121 .range_max = KX022A_REG_BUF_CLEAR,
122 },
123 };
124
125 static const struct regmap_access_table kx022a_wo_regs = {
126 .no_ranges = &kx022a_write_only_ranges[0],
127 .n_no_ranges = ARRAY_SIZE(kx022a_write_only_ranges),
128 };
129
130 static const struct regmap_range kx022a_noinc_read_ranges[] = {
131 {
132 .range_min = KX022A_REG_BUF_READ,
133 .range_max = KX022A_REG_BUF_READ,
134 },
135 };
136
137 static const struct regmap_access_table kx022a_nir_regs = {
138 .yes_ranges = &kx022a_noinc_read_ranges[0],
139 .n_yes_ranges = ARRAY_SIZE(kx022a_noinc_read_ranges),
140 };
141
142 static const struct regmap_config kx022a_regmap_config = {
143 .reg_bits = 8,
144 .val_bits = 8,
145 .volatile_table = &kx022a_volatile_regs,
146 .rd_table = &kx022a_wo_regs,
147 .wr_table = &kx022a_ro_regs,
148 .rd_noinc_table = &kx022a_nir_regs,
149 .precious_table = &kx022a_precious_regs,
150 .max_register = KX022A_MAX_REGISTER,
151 .cache_type = REGCACHE_RBTREE,
152 };
153
154 /* Regmap configs kx132 */
155 static const struct regmap_range kx132_volatile_ranges[] = {
156 {
157 .range_min = KX132_REG_XADP_L,
158 .range_max = KX132_REG_COTR,
159 }, {
160 .range_min = KX132_REG_TSCP,
161 .range_max = KX132_REG_INT_REL,
162 }, {
163 /* The reset bit will be cleared by sensor */
164 .range_min = KX132_REG_CNTL2,
165 .range_max = KX132_REG_CNTL2,
166 }, {
167 .range_min = KX132_REG_CNTL5,
168 .range_max = KX132_REG_CNTL5,
169 }, {
170 .range_min = KX132_REG_BUF_STATUS_1,
171 .range_max = KX132_REG_BUF_READ,
172 },
173 };
174
175 static const struct regmap_access_table kx132_volatile_regs = {
176 .yes_ranges = &kx132_volatile_ranges[0],
177 .n_yes_ranges = ARRAY_SIZE(kx132_volatile_ranges),
178 };
179
180 static const struct regmap_range kx132_precious_ranges[] = {
181 {
182 .range_min = KX132_REG_INT_REL,
183 .range_max = KX132_REG_INT_REL,
184 },
185 };
186
187 static const struct regmap_access_table kx132_precious_regs = {
188 .yes_ranges = &kx132_precious_ranges[0],
189 .n_yes_ranges = ARRAY_SIZE(kx132_precious_ranges),
190 };
191
192 static const struct regmap_range kx132_read_only_ranges[] = {
193 {
194 .range_min = KX132_REG_XADP_L,
195 .range_max = KX132_REG_INT_REL,
196 }, {
197 .range_min = KX132_REG_BUF_STATUS_1,
198 .range_max = KX132_REG_BUF_STATUS_2,
199 }, {
200 .range_min = KX132_REG_BUF_READ,
201 .range_max = KX132_REG_BUF_READ,
202 }, {
203 /* Kionix reserved registers: should not be written */
204 .range_min = 0x28,
205 .range_max = 0x28,
206 }, {
207 .range_min = 0x35,
208 .range_max = 0x36,
209 }, {
210 .range_min = 0x3c,
211 .range_max = 0x48,
212 }, {
213 .range_min = 0x4e,
214 .range_max = 0x5c,
215 }, {
216 .range_min = 0x77,
217 .range_max = 0x7f,
218 },
219 };
220
221 static const struct regmap_access_table kx132_ro_regs = {
222 .no_ranges = &kx132_read_only_ranges[0],
223 .n_no_ranges = ARRAY_SIZE(kx132_read_only_ranges),
224 };
225
226 static const struct regmap_range kx132_write_only_ranges[] = {
227 {
228 .range_min = KX132_REG_SELF_TEST,
229 .range_max = KX132_REG_SELF_TEST,
230 }, {
231 .range_min = KX132_REG_BUF_CLEAR,
232 .range_max = KX132_REG_BUF_CLEAR,
233 },
234 };
235
236 static const struct regmap_access_table kx132_wo_regs = {
237 .no_ranges = &kx132_write_only_ranges[0],
238 .n_no_ranges = ARRAY_SIZE(kx132_write_only_ranges),
239 };
240
241 static const struct regmap_range kx132_noinc_read_ranges[] = {
242 {
243 .range_min = KX132_REG_BUF_READ,
244 .range_max = KX132_REG_BUF_READ,
245 },
246 };
247
248 static const struct regmap_access_table kx132_nir_regs = {
249 .yes_ranges = &kx132_noinc_read_ranges[0],
250 .n_yes_ranges = ARRAY_SIZE(kx132_noinc_read_ranges),
251 };
252
253 static const struct regmap_config kx132_regmap_config = {
254 .reg_bits = 8,
255 .val_bits = 8,
256 .volatile_table = &kx132_volatile_regs,
257 .rd_table = &kx132_wo_regs,
258 .wr_table = &kx132_ro_regs,
259 .rd_noinc_table = &kx132_nir_regs,
260 .precious_table = &kx132_precious_regs,
261 .max_register = KX132_MAX_REGISTER,
262 .cache_type = REGCACHE_RBTREE,
263 };
264
265 struct kx022a_data {
266 struct regmap *regmap;
267 const struct kx022a_chip_info *chip_info;
268 struct iio_trigger *trig;
269 struct device *dev;
270 struct iio_mount_matrix orientation;
271 int64_t timestamp, old_timestamp;
272
273 int irq;
274 int inc_reg;
275 int ien_reg;
276
277 unsigned int state;
278 unsigned int odr_ns;
279
280 bool trigger_enabled;
281 /*
282 * Prevent toggling the sensor stby/active state (PC1 bit) in the
283 * middle of a configuration, or when the fifo is enabled. Also,
284 * protect the data stored/retrieved from this structure from
285 * concurrent accesses.
286 */
287 struct mutex mutex;
288 u8 watermark;
289
290 __le16 *fifo_buffer;
291
292 /* 3 x 16bit accel data + timestamp */
293 __le16 buffer[8] __aligned(IIO_DMA_MINALIGN);
294 struct {
295 __le16 channels[3];
296 aligned_s64 ts;
297 } scan;
298 };
299
300 static const struct iio_mount_matrix *
kx022a_get_mount_matrix(const struct iio_dev * idev,const struct iio_chan_spec * chan)301 kx022a_get_mount_matrix(const struct iio_dev *idev,
302 const struct iio_chan_spec *chan)
303 {
304 struct kx022a_data *data = iio_priv(idev);
305
306 return &data->orientation;
307 }
308
309 enum {
310 AXIS_X,
311 AXIS_Y,
312 AXIS_Z,
313 AXIS_MAX
314 };
315
316 static const unsigned long kx022a_scan_masks[] = {
317 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), 0
318 };
319
320 static const struct iio_chan_spec_ext_info kx022a_ext_info[] = {
321 IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kx022a_get_mount_matrix),
322 { }
323 };
324
325 #define KX022A_ACCEL_CHAN(axis, reg, index) \
326 { \
327 .type = IIO_ACCEL, \
328 .modified = 1, \
329 .channel2 = IIO_MOD_##axis, \
330 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
331 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
332 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
333 .info_mask_shared_by_type_available = \
334 BIT(IIO_CHAN_INFO_SCALE) | \
335 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
336 .ext_info = kx022a_ext_info, \
337 .address = reg, \
338 .scan_index = index, \
339 .scan_type = { \
340 .sign = 's', \
341 .realbits = 16, \
342 .storagebits = 16, \
343 .endianness = IIO_LE, \
344 }, \
345 }
346
347 static const struct iio_chan_spec kx022a_channels[] = {
348 KX022A_ACCEL_CHAN(X, KX022A_REG_XOUT_L, 0),
349 KX022A_ACCEL_CHAN(Y, KX022A_REG_YOUT_L, 1),
350 KX022A_ACCEL_CHAN(Z, KX022A_REG_ZOUT_L, 2),
351 IIO_CHAN_SOFT_TIMESTAMP(3),
352 };
353
354 static const struct iio_chan_spec kx132_channels[] = {
355 KX022A_ACCEL_CHAN(X, KX132_REG_XOUT_L, 0),
356 KX022A_ACCEL_CHAN(Y, KX132_REG_YOUT_L, 1),
357 KX022A_ACCEL_CHAN(Z, KX132_REG_ZOUT_L, 2),
358 IIO_CHAN_SOFT_TIMESTAMP(3),
359 };
360
361 /*
362 * The sensor HW can support ODR up to 1600 Hz, which is beyond what most of the
363 * Linux CPUs can handle without dropping samples. Also, the low power mode is
364 * not available for higher sample rates. Thus, the driver only supports 200 Hz
365 * and slower ODRs. The slowest is 0.78 Hz.
366 */
367 static const int kx022a_accel_samp_freq_table[][2] = {
368 { 0, 780000 },
369 { 1, 563000 },
370 { 3, 125000 },
371 { 6, 250000 },
372 { 12, 500000 },
373 { 25, 0 },
374 { 50, 0 },
375 { 100, 0 },
376 { 200, 0 },
377 };
378
379 static const unsigned int kx022a_odrs[] = {
380 1282051282,
381 639795266,
382 320 * MEGA,
383 160 * MEGA,
384 80 * MEGA,
385 40 * MEGA,
386 20 * MEGA,
387 10 * MEGA,
388 5 * MEGA,
389 };
390
391 /*
392 * range is typically +-2G/4G/8G/16G, distributed over the amount of bits.
393 * The scale table can be calculated using
394 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
395 * => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
396 * in low-power mode(?) )
397 * => +/-2G => 4 / 2^16 * 9,80665
398 * => +/-2G - 0.000598550415
399 * +/-4G - 0.00119710083
400 * +/-8G - 0.00239420166
401 * +/-16G - 0.00478840332
402 */
403 static const int kx022a_scale_table[][2] = {
404 { 0, 598550 },
405 { 0, 1197101 },
406 { 0, 2394202 },
407 { 0, 4788403 },
408 };
409
kx022a_read_avail(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,const int ** vals,int * type,int * length,long mask)410 static int kx022a_read_avail(struct iio_dev *indio_dev,
411 struct iio_chan_spec const *chan,
412 const int **vals, int *type, int *length,
413 long mask)
414 {
415 switch (mask) {
416 case IIO_CHAN_INFO_SAMP_FREQ:
417 *vals = (const int *)kx022a_accel_samp_freq_table;
418 *length = ARRAY_SIZE(kx022a_accel_samp_freq_table) *
419 ARRAY_SIZE(kx022a_accel_samp_freq_table[0]);
420 *type = IIO_VAL_INT_PLUS_MICRO;
421 return IIO_AVAIL_LIST;
422 case IIO_CHAN_INFO_SCALE:
423 *vals = (const int *)kx022a_scale_table;
424 *length = ARRAY_SIZE(kx022a_scale_table) *
425 ARRAY_SIZE(kx022a_scale_table[0]);
426 *type = IIO_VAL_INT_PLUS_NANO;
427 return IIO_AVAIL_LIST;
428 default:
429 return -EINVAL;
430 }
431 }
432
433 #define KX022A_DEFAULT_PERIOD_NS (20 * NSEC_PER_MSEC)
434
kx022a_reg2freq(unsigned int val,int * val1,int * val2)435 static void kx022a_reg2freq(unsigned int val, int *val1, int *val2)
436 {
437 *val1 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][0];
438 *val2 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][1];
439 }
440
kx022a_reg2scale(unsigned int val,unsigned int * val1,unsigned int * val2)441 static void kx022a_reg2scale(unsigned int val, unsigned int *val1,
442 unsigned int *val2)
443 {
444 val &= KX022A_MASK_GSEL;
445 val >>= KX022A_GSEL_SHIFT;
446
447 *val1 = kx022a_scale_table[val][0];
448 *val2 = kx022a_scale_table[val][1];
449 }
450
kx022a_turn_on_off_unlocked(struct kx022a_data * data,bool on)451 static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on)
452 {
453 int ret;
454
455 if (on)
456 ret = regmap_set_bits(data->regmap, data->chip_info->cntl,
457 KX022A_MASK_PC1);
458 else
459 ret = regmap_clear_bits(data->regmap, data->chip_info->cntl,
460 KX022A_MASK_PC1);
461 if (ret)
462 dev_err(data->dev, "Turn %s fail %d\n", str_on_off(on), ret);
463
464 return ret;
465 }
466
kx022a_turn_off_lock(struct kx022a_data * data)467 static int kx022a_turn_off_lock(struct kx022a_data *data)
468 {
469 int ret;
470
471 mutex_lock(&data->mutex);
472 ret = kx022a_turn_on_off_unlocked(data, false);
473 if (ret)
474 mutex_unlock(&data->mutex);
475
476 return ret;
477 }
478
kx022a_turn_on_unlock(struct kx022a_data * data)479 static int kx022a_turn_on_unlock(struct kx022a_data *data)
480 {
481 int ret;
482
483 ret = kx022a_turn_on_off_unlocked(data, true);
484 mutex_unlock(&data->mutex);
485
486 return ret;
487 }
488
kx022a_write_raw_get_fmt(struct iio_dev * idev,struct iio_chan_spec const * chan,long mask)489 static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
490 struct iio_chan_spec const *chan,
491 long mask)
492 {
493 switch (mask) {
494 case IIO_CHAN_INFO_SCALE:
495 return IIO_VAL_INT_PLUS_NANO;
496 case IIO_CHAN_INFO_SAMP_FREQ:
497 return IIO_VAL_INT_PLUS_MICRO;
498 default:
499 return -EINVAL;
500 }
501 }
502
kx022a_write_raw(struct iio_dev * idev,struct iio_chan_spec const * chan,int val,int val2,long mask)503 static int kx022a_write_raw(struct iio_dev *idev,
504 struct iio_chan_spec const *chan,
505 int val, int val2, long mask)
506 {
507 struct kx022a_data *data = iio_priv(idev);
508 int ret, n;
509
510 /*
511 * We should not allow changing scale or frequency when FIFO is running
512 * as it will mess the timestamp/scale for samples existing in the
513 * buffer. If this turns out to be an issue we can later change logic
514 * to internally flush the fifo before reconfiguring so the samples in
515 * fifo keep matching the freq/scale settings. (Such setup could cause
516 * issues if users trust the watermark to be reached within known
517 * time-limit).
518 */
519 ret = iio_device_claim_direct_mode(idev);
520 if (ret)
521 return ret;
522
523 switch (mask) {
524 case IIO_CHAN_INFO_SAMP_FREQ:
525 n = ARRAY_SIZE(kx022a_accel_samp_freq_table);
526
527 while (n--)
528 if (val == kx022a_accel_samp_freq_table[n][0] &&
529 val2 == kx022a_accel_samp_freq_table[n][1])
530 break;
531 if (n < 0) {
532 ret = -EINVAL;
533 goto unlock_out;
534 }
535 ret = kx022a_turn_off_lock(data);
536 if (ret)
537 break;
538
539 ret = regmap_update_bits(data->regmap,
540 data->chip_info->odcntl,
541 KX022A_MASK_ODR, n);
542 data->odr_ns = kx022a_odrs[n];
543 kx022a_turn_on_unlock(data);
544 break;
545 case IIO_CHAN_INFO_SCALE:
546 n = ARRAY_SIZE(kx022a_scale_table);
547
548 while (n-- > 0)
549 if (val == kx022a_scale_table[n][0] &&
550 val2 == kx022a_scale_table[n][1])
551 break;
552 if (n < 0) {
553 ret = -EINVAL;
554 goto unlock_out;
555 }
556
557 ret = kx022a_turn_off_lock(data);
558 if (ret)
559 break;
560
561 ret = regmap_update_bits(data->regmap, data->chip_info->cntl,
562 KX022A_MASK_GSEL,
563 n << KX022A_GSEL_SHIFT);
564 kx022a_turn_on_unlock(data);
565 break;
566 default:
567 ret = -EINVAL;
568 break;
569 }
570
571 unlock_out:
572 iio_device_release_direct_mode(idev);
573
574 return ret;
575 }
576
kx022a_fifo_set_wmi(struct kx022a_data * data)577 static int kx022a_fifo_set_wmi(struct kx022a_data *data)
578 {
579 u8 threshold;
580
581 threshold = data->watermark;
582
583 return regmap_update_bits(data->regmap, data->chip_info->buf_cntl1,
584 KX022A_MASK_WM_TH, threshold);
585 }
586
kx022a_get_axis(struct kx022a_data * data,struct iio_chan_spec const * chan,int * val)587 static int kx022a_get_axis(struct kx022a_data *data,
588 struct iio_chan_spec const *chan,
589 int *val)
590 {
591 int ret;
592
593 ret = regmap_bulk_read(data->regmap, chan->address, &data->buffer[0],
594 sizeof(__le16));
595 if (ret)
596 return ret;
597
598 *val = (s16)le16_to_cpu(data->buffer[0]);
599
600 return IIO_VAL_INT;
601 }
602
kx022a_read_raw(struct iio_dev * idev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)603 static int kx022a_read_raw(struct iio_dev *idev,
604 struct iio_chan_spec const *chan,
605 int *val, int *val2, long mask)
606 {
607 struct kx022a_data *data = iio_priv(idev);
608 unsigned int regval;
609 int ret;
610
611 switch (mask) {
612 case IIO_CHAN_INFO_RAW:
613 ret = iio_device_claim_direct_mode(idev);
614 if (ret)
615 return ret;
616
617 mutex_lock(&data->mutex);
618 ret = kx022a_get_axis(data, chan, val);
619 mutex_unlock(&data->mutex);
620
621 iio_device_release_direct_mode(idev);
622
623 return ret;
624
625 case IIO_CHAN_INFO_SAMP_FREQ:
626 ret = regmap_read(data->regmap, data->chip_info->odcntl, ®val);
627 if (ret)
628 return ret;
629
630 if ((regval & KX022A_MASK_ODR) >
631 ARRAY_SIZE(kx022a_accel_samp_freq_table)) {
632 dev_err(data->dev, "Invalid ODR\n");
633 return -EINVAL;
634 }
635
636 kx022a_reg2freq(regval, val, val2);
637
638 return IIO_VAL_INT_PLUS_MICRO;
639
640 case IIO_CHAN_INFO_SCALE:
641 ret = regmap_read(data->regmap, data->chip_info->cntl, ®val);
642 if (ret < 0)
643 return ret;
644
645 kx022a_reg2scale(regval, val, val2);
646
647 return IIO_VAL_INT_PLUS_NANO;
648 }
649
650 return -EINVAL;
651 };
652
kx022a_set_watermark(struct iio_dev * idev,unsigned int val)653 static int kx022a_set_watermark(struct iio_dev *idev, unsigned int val)
654 {
655 struct kx022a_data *data = iio_priv(idev);
656
657 val = min(data->chip_info->fifo_length, val);
658
659 mutex_lock(&data->mutex);
660 data->watermark = val;
661 mutex_unlock(&data->mutex);
662
663 return 0;
664 }
665
hwfifo_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)666 static ssize_t hwfifo_enabled_show(struct device *dev,
667 struct device_attribute *attr,
668 char *buf)
669 {
670 struct iio_dev *idev = dev_to_iio_dev(dev);
671 struct kx022a_data *data = iio_priv(idev);
672 bool state;
673
674 mutex_lock(&data->mutex);
675 state = data->state;
676 mutex_unlock(&data->mutex);
677
678 return sysfs_emit(buf, "%d\n", state);
679 }
680
hwfifo_watermark_show(struct device * dev,struct device_attribute * attr,char * buf)681 static ssize_t hwfifo_watermark_show(struct device *dev,
682 struct device_attribute *attr,
683 char *buf)
684 {
685 struct iio_dev *idev = dev_to_iio_dev(dev);
686 struct kx022a_data *data = iio_priv(idev);
687 int wm;
688
689 mutex_lock(&data->mutex);
690 wm = data->watermark;
691 mutex_unlock(&data->mutex);
692
693 return sysfs_emit(buf, "%d\n", wm);
694 }
695
696 static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0);
697 static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0);
698
699 static const struct iio_dev_attr *kx022a_fifo_attributes[] = {
700 &iio_dev_attr_hwfifo_watermark,
701 &iio_dev_attr_hwfifo_enabled,
702 NULL
703 };
704
kx022a_drop_fifo_contents(struct kx022a_data * data)705 static int kx022a_drop_fifo_contents(struct kx022a_data *data)
706 {
707 /*
708 * We must clear the old time-stamp to avoid computing the timestamps
709 * based on samples acquired when buffer was last enabled.
710 *
711 * We don't need to protect the timestamp as long as we are only
712 * called from fifo-disable where we can guarantee the sensor is not
713 * triggering interrupts and where the mutex is locked to prevent the
714 * user-space access.
715 */
716 data->timestamp = 0;
717
718 return regmap_write(data->regmap, data->chip_info->buf_clear, 0x0);
719 }
720
kx022a_get_fifo_bytes_available(struct kx022a_data * data)721 static int kx022a_get_fifo_bytes_available(struct kx022a_data *data)
722 {
723 int ret, fifo_bytes;
724
725 ret = regmap_read(data->regmap, KX022A_REG_BUF_STATUS_1, &fifo_bytes);
726 if (ret) {
727 dev_err(data->dev, "Error reading buffer status\n");
728 return ret;
729 }
730
731 if (fifo_bytes == KX022A_FIFO_FULL_VALUE)
732 return KX022A_FIFO_MAX_BYTES;
733
734 return fifo_bytes;
735 }
736
kx132_get_fifo_bytes_available(struct kx022a_data * data)737 static int kx132_get_fifo_bytes_available(struct kx022a_data *data)
738 {
739 __le16 buf_status;
740 int ret, fifo_bytes;
741
742 ret = regmap_bulk_read(data->regmap, data->chip_info->buf_status1,
743 &buf_status, sizeof(buf_status));
744 if (ret) {
745 dev_err(data->dev, "Error reading buffer status\n");
746 return ret;
747 }
748
749 fifo_bytes = le16_to_cpu(buf_status);
750 fifo_bytes &= data->chip_info->buf_smp_lvl_mask;
751 fifo_bytes = min((unsigned int)fifo_bytes, data->chip_info->fifo_length *
752 KX022A_FIFO_SAMPLES_SIZE_BYTES);
753
754 return fifo_bytes;
755 }
756
__kx022a_fifo_flush(struct iio_dev * idev,unsigned int samples,bool irq)757 static int __kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples,
758 bool irq)
759 {
760 struct kx022a_data *data = iio_priv(idev);
761 uint64_t sample_period;
762 int count, fifo_bytes;
763 bool renable = false;
764 int64_t tstamp;
765 int ret, i;
766
767 fifo_bytes = data->chip_info->get_fifo_bytes_available(data);
768
769 if (fifo_bytes % KX022A_FIFO_SAMPLES_SIZE_BYTES)
770 dev_warn(data->dev, "Bad FIFO alignment. Data may be corrupt\n");
771
772 count = fifo_bytes / KX022A_FIFO_SAMPLES_SIZE_BYTES;
773 if (!count)
774 return 0;
775
776 /*
777 * If we are being called from IRQ handler we know the stored timestamp
778 * is fairly accurate for the last stored sample. Otherwise, if we are
779 * called as a result of a read operation from userspace and hence
780 * before the watermark interrupt was triggered, take a timestamp
781 * now. We can fall anywhere in between two samples so the error in this
782 * case is at most one sample period.
783 */
784 if (!irq) {
785 /*
786 * We need to have the IRQ disabled or we risk of messing-up
787 * the timestamps. If we are ran from IRQ, then the
788 * IRQF_ONESHOT has us covered - but if we are ran by the
789 * user-space read we need to disable the IRQ to be on a safe
790 * side. We do this usng synchronous disable so that if the
791 * IRQ thread is being ran on other CPU we wait for it to be
792 * finished.
793 */
794 disable_irq(data->irq);
795 renable = true;
796
797 data->old_timestamp = data->timestamp;
798 data->timestamp = iio_get_time_ns(idev);
799 }
800
801 /*
802 * Approximate timestamps for each of the sample based on the sampling
803 * frequency, timestamp for last sample and number of samples.
804 *
805 * We'd better not use the current bandwidth settings to compute the
806 * sample period. The real sample rate varies with the device and
807 * small variation adds when we store a large number of samples.
808 *
809 * To avoid this issue we compute the actual sample period ourselves
810 * based on the timestamp delta between the last two flush operations.
811 */
812 if (data->old_timestamp) {
813 sample_period = data->timestamp - data->old_timestamp;
814 do_div(sample_period, count);
815 } else {
816 sample_period = data->odr_ns;
817 }
818 tstamp = data->timestamp - (count - 1) * sample_period;
819
820 if (samples && count > samples) {
821 /*
822 * Here we leave some old samples to the buffer. We need to
823 * adjust the timestamp to match the first sample in the buffer
824 * or we will miscalculate the sample_period at next round.
825 */
826 data->timestamp -= (count - samples) * sample_period;
827 count = samples;
828 }
829
830 fifo_bytes = count * KX022A_FIFO_SAMPLES_SIZE_BYTES;
831 ret = regmap_noinc_read(data->regmap, data->chip_info->buf_read,
832 data->fifo_buffer, fifo_bytes);
833 if (ret)
834 goto renable_out;
835
836 for (i = 0; i < count; i++) {
837 __le16 *sam = &data->fifo_buffer[i * 3];
838 __le16 *chs;
839 int bit;
840
841 chs = &data->scan.channels[0];
842 for_each_set_bit(bit, idev->active_scan_mask, AXIS_MAX)
843 chs[bit] = sam[bit];
844
845 iio_push_to_buffers_with_timestamp(idev, &data->scan, tstamp);
846
847 tstamp += sample_period;
848 }
849
850 ret = count;
851
852 renable_out:
853 if (renable)
854 enable_irq(data->irq);
855
856 return ret;
857 }
858
kx022a_fifo_flush(struct iio_dev * idev,unsigned int samples)859 static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
860 {
861 struct kx022a_data *data = iio_priv(idev);
862 int ret;
863
864 mutex_lock(&data->mutex);
865 ret = __kx022a_fifo_flush(idev, samples, false);
866 mutex_unlock(&data->mutex);
867
868 return ret;
869 }
870
871 static const struct iio_info kx022a_info = {
872 .read_raw = &kx022a_read_raw,
873 .write_raw = &kx022a_write_raw,
874 .write_raw_get_fmt = &kx022a_write_raw_get_fmt,
875 .read_avail = &kx022a_read_avail,
876
877 .validate_trigger = iio_validate_own_trigger,
878 .hwfifo_set_watermark = kx022a_set_watermark,
879 .hwfifo_flush_to_buffer = kx022a_fifo_flush,
880 };
881
kx022a_set_drdy_irq(struct kx022a_data * data,bool en)882 static int kx022a_set_drdy_irq(struct kx022a_data *data, bool en)
883 {
884 if (en)
885 return regmap_set_bits(data->regmap, data->chip_info->cntl,
886 KX022A_MASK_DRDY);
887
888 return regmap_clear_bits(data->regmap, data->chip_info->cntl,
889 KX022A_MASK_DRDY);
890 }
891
kx022a_prepare_irq_pin(struct kx022a_data * data)892 static int kx022a_prepare_irq_pin(struct kx022a_data *data)
893 {
894 /* Enable IRQ1 pin. Set polarity to active low */
895 int mask = KX022A_MASK_IEN | KX022A_MASK_IPOL |
896 KX022A_MASK_ITYP;
897 int val = KX022A_MASK_IEN | KX022A_IPOL_LOW |
898 KX022A_ITYP_LEVEL;
899 int ret;
900
901 ret = regmap_update_bits(data->regmap, data->inc_reg, mask, val);
902 if (ret)
903 return ret;
904
905 /* We enable WMI to IRQ pin only at buffer_enable */
906 mask = KX022A_MASK_INS2_DRDY;
907
908 return regmap_set_bits(data->regmap, data->ien_reg, mask);
909 }
910
kx022a_fifo_disable(struct kx022a_data * data)911 static int kx022a_fifo_disable(struct kx022a_data *data)
912 {
913 int ret = 0;
914
915 ret = kx022a_turn_off_lock(data);
916 if (ret)
917 return ret;
918
919 ret = regmap_clear_bits(data->regmap, data->ien_reg, KX022A_MASK_WMI);
920 if (ret)
921 goto unlock_out;
922
923 ret = regmap_clear_bits(data->regmap, data->chip_info->buf_cntl2,
924 KX022A_MASK_BUF_EN);
925 if (ret)
926 goto unlock_out;
927
928 data->state &= ~KX022A_STATE_FIFO;
929
930 kx022a_drop_fifo_contents(data);
931
932 kfree(data->fifo_buffer);
933
934 return kx022a_turn_on_unlock(data);
935
936 unlock_out:
937 mutex_unlock(&data->mutex);
938
939 return ret;
940 }
941
kx022a_buffer_predisable(struct iio_dev * idev)942 static int kx022a_buffer_predisable(struct iio_dev *idev)
943 {
944 struct kx022a_data *data = iio_priv(idev);
945
946 if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
947 return 0;
948
949 return kx022a_fifo_disable(data);
950 }
951
kx022a_fifo_enable(struct kx022a_data * data)952 static int kx022a_fifo_enable(struct kx022a_data *data)
953 {
954 int ret;
955
956 data->fifo_buffer = kmalloc_array(data->chip_info->fifo_length,
957 KX022A_FIFO_SAMPLES_SIZE_BYTES,
958 GFP_KERNEL);
959 if (!data->fifo_buffer)
960 return -ENOMEM;
961
962 ret = kx022a_turn_off_lock(data);
963 if (ret)
964 return ret;
965
966 /* Update watermark to HW */
967 ret = kx022a_fifo_set_wmi(data);
968 if (ret)
969 goto unlock_out;
970
971 /* Enable buffer */
972 ret = regmap_set_bits(data->regmap, data->chip_info->buf_cntl2,
973 KX022A_MASK_BUF_EN);
974 if (ret)
975 goto unlock_out;
976
977 data->state |= KX022A_STATE_FIFO;
978 ret = regmap_set_bits(data->regmap, data->ien_reg,
979 KX022A_MASK_WMI);
980 if (ret)
981 goto unlock_out;
982
983 return kx022a_turn_on_unlock(data);
984
985 unlock_out:
986 mutex_unlock(&data->mutex);
987
988 return ret;
989 }
990
kx022a_buffer_postenable(struct iio_dev * idev)991 static int kx022a_buffer_postenable(struct iio_dev *idev)
992 {
993 struct kx022a_data *data = iio_priv(idev);
994
995 /*
996 * If we use data-ready trigger, then the IRQ masks should be handled by
997 * trigger enable and the hardware buffer is not used but we just update
998 * results to the IIO fifo when data-ready triggers.
999 */
1000 if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
1001 return 0;
1002
1003 return kx022a_fifo_enable(data);
1004 }
1005
1006 static const struct iio_buffer_setup_ops kx022a_buffer_ops = {
1007 .postenable = kx022a_buffer_postenable,
1008 .predisable = kx022a_buffer_predisable,
1009 };
1010
kx022a_trigger_handler(int irq,void * p)1011 static irqreturn_t kx022a_trigger_handler(int irq, void *p)
1012 {
1013 struct iio_poll_func *pf = p;
1014 struct iio_dev *idev = pf->indio_dev;
1015 struct kx022a_data *data = iio_priv(idev);
1016 int ret;
1017
1018 ret = regmap_bulk_read(data->regmap, data->chip_info->xout_l, data->buffer,
1019 KX022A_FIFO_SAMPLES_SIZE_BYTES);
1020 if (ret < 0)
1021 goto err_read;
1022
1023 iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
1024 err_read:
1025 iio_trigger_notify_done(idev->trig);
1026
1027 return IRQ_HANDLED;
1028 }
1029
1030 /* Get timestamps and wake the thread if we need to read data */
kx022a_irq_handler(int irq,void * private)1031 static irqreturn_t kx022a_irq_handler(int irq, void *private)
1032 {
1033 struct iio_dev *idev = private;
1034 struct kx022a_data *data = iio_priv(idev);
1035
1036 data->old_timestamp = data->timestamp;
1037 data->timestamp = iio_get_time_ns(idev);
1038
1039 if (data->state & KX022A_STATE_FIFO || data->trigger_enabled)
1040 return IRQ_WAKE_THREAD;
1041
1042 return IRQ_NONE;
1043 }
1044
1045 /*
1046 * WMI and data-ready IRQs are acked when results are read. If we add
1047 * TILT/WAKE or other IRQs - then we may need to implement the acking
1048 * (which is racy).
1049 */
kx022a_irq_thread_handler(int irq,void * private)1050 static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
1051 {
1052 struct iio_dev *idev = private;
1053 struct kx022a_data *data = iio_priv(idev);
1054 irqreturn_t ret = IRQ_NONE;
1055
1056 mutex_lock(&data->mutex);
1057
1058 if (data->trigger_enabled) {
1059 iio_trigger_poll_nested(data->trig);
1060 ret = IRQ_HANDLED;
1061 }
1062
1063 if (data->state & KX022A_STATE_FIFO) {
1064 int ok;
1065
1066 ok = __kx022a_fifo_flush(idev, data->chip_info->fifo_length, true);
1067 if (ok > 0)
1068 ret = IRQ_HANDLED;
1069 }
1070
1071 mutex_unlock(&data->mutex);
1072
1073 return ret;
1074 }
1075
kx022a_trigger_set_state(struct iio_trigger * trig,bool state)1076 static int kx022a_trigger_set_state(struct iio_trigger *trig,
1077 bool state)
1078 {
1079 struct kx022a_data *data = iio_trigger_get_drvdata(trig);
1080 int ret = 0;
1081
1082 mutex_lock(&data->mutex);
1083
1084 if (data->trigger_enabled == state)
1085 goto unlock_out;
1086
1087 if (data->state & KX022A_STATE_FIFO) {
1088 dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
1089 ret = -EBUSY;
1090 goto unlock_out;
1091 }
1092
1093 ret = kx022a_turn_on_off_unlocked(data, false);
1094 if (ret)
1095 goto unlock_out;
1096
1097 data->trigger_enabled = state;
1098 ret = kx022a_set_drdy_irq(data, state);
1099 if (ret)
1100 goto unlock_out;
1101
1102 ret = kx022a_turn_on_off_unlocked(data, true);
1103
1104 unlock_out:
1105 mutex_unlock(&data->mutex);
1106
1107 return ret;
1108 }
1109
1110 static const struct iio_trigger_ops kx022a_trigger_ops = {
1111 .set_trigger_state = kx022a_trigger_set_state,
1112 };
1113
kx022a_chip_init(struct kx022a_data * data)1114 static int kx022a_chip_init(struct kx022a_data *data)
1115 {
1116 int ret, val;
1117
1118 /* Reset the senor */
1119 ret = regmap_write(data->regmap, data->chip_info->cntl2, KX022A_MASK_SRST);
1120 if (ret)
1121 return ret;
1122
1123 /*
1124 * I've seen I2C read failures if we poll too fast after the sensor
1125 * reset. Slight delay gives I2C block the time to recover.
1126 */
1127 msleep(1);
1128
1129 ret = regmap_read_poll_timeout(data->regmap, data->chip_info->cntl2, val,
1130 !(val & KX022A_MASK_SRST),
1131 KX022A_SOFT_RESET_WAIT_TIME_US,
1132 KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US);
1133 if (ret) {
1134 dev_err(data->dev, "Sensor reset %s\n",
1135 val & KX022A_MASK_SRST ? "timeout" : "fail#");
1136 return ret;
1137 }
1138
1139 ret = regmap_reinit_cache(data->regmap, data->chip_info->regmap_config);
1140 if (ret) {
1141 dev_err(data->dev, "Failed to reinit reg cache\n");
1142 return ret;
1143 }
1144
1145 /* set data res 16bit */
1146 ret = regmap_set_bits(data->regmap, data->chip_info->buf_cntl2,
1147 KX022A_MASK_BRES16);
1148 if (ret) {
1149 dev_err(data->dev, "Failed to set data resolution\n");
1150 return ret;
1151 }
1152
1153 return kx022a_prepare_irq_pin(data);
1154 }
1155
1156 const struct kx022a_chip_info kx022a_chip_info = {
1157 .name = "kx022-accel",
1158 .regmap_config = &kx022a_regmap_config,
1159 .channels = kx022a_channels,
1160 .num_channels = ARRAY_SIZE(kx022a_channels),
1161 .fifo_length = KX022A_FIFO_LENGTH,
1162 .who = KX022A_REG_WHO,
1163 .id = KX022A_ID,
1164 .cntl = KX022A_REG_CNTL,
1165 .cntl2 = KX022A_REG_CNTL2,
1166 .odcntl = KX022A_REG_ODCNTL,
1167 .buf_cntl1 = KX022A_REG_BUF_CNTL1,
1168 .buf_cntl2 = KX022A_REG_BUF_CNTL2,
1169 .buf_clear = KX022A_REG_BUF_CLEAR,
1170 .buf_status1 = KX022A_REG_BUF_STATUS_1,
1171 .buf_read = KX022A_REG_BUF_READ,
1172 .inc1 = KX022A_REG_INC1,
1173 .inc4 = KX022A_REG_INC4,
1174 .inc5 = KX022A_REG_INC5,
1175 .inc6 = KX022A_REG_INC6,
1176 .xout_l = KX022A_REG_XOUT_L,
1177 .get_fifo_bytes_available = kx022a_get_fifo_bytes_available,
1178 };
1179 EXPORT_SYMBOL_NS_GPL(kx022a_chip_info, "IIO_KX022A");
1180
1181 const struct kx022a_chip_info kx132_chip_info = {
1182 .name = "kx132-1211",
1183 .regmap_config = &kx132_regmap_config,
1184 .channels = kx132_channels,
1185 .num_channels = ARRAY_SIZE(kx132_channels),
1186 .fifo_length = KX132_FIFO_LENGTH,
1187 .who = KX132_REG_WHO,
1188 .id = KX132_ID,
1189 .cntl = KX132_REG_CNTL,
1190 .cntl2 = KX132_REG_CNTL2,
1191 .odcntl = KX132_REG_ODCNTL,
1192 .buf_cntl1 = KX132_REG_BUF_CNTL1,
1193 .buf_cntl2 = KX132_REG_BUF_CNTL2,
1194 .buf_clear = KX132_REG_BUF_CLEAR,
1195 .buf_status1 = KX132_REG_BUF_STATUS_1,
1196 .buf_smp_lvl_mask = KX132_MASK_BUF_SMP_LVL,
1197 .buf_read = KX132_REG_BUF_READ,
1198 .inc1 = KX132_REG_INC1,
1199 .inc4 = KX132_REG_INC4,
1200 .inc5 = KX132_REG_INC5,
1201 .inc6 = KX132_REG_INC6,
1202 .xout_l = KX132_REG_XOUT_L,
1203 .get_fifo_bytes_available = kx132_get_fifo_bytes_available,
1204 };
1205 EXPORT_SYMBOL_NS_GPL(kx132_chip_info, "IIO_KX022A");
1206
1207 /*
1208 * Despite the naming, KX132ACR-LBZ is not similar to KX132-1211 but it is
1209 * exact subset of KX022A. KX132ACR-LBZ is meant to be used for industrial
1210 * applications and the tap/double tap, free fall and tilt engines were
1211 * removed. Rest of the registers and functionalities (excluding the ID
1212 * register) are exact match to what is found in KX022.
1213 */
1214 const struct kx022a_chip_info kx132acr_chip_info = {
1215 .name = "kx132acr-lbz",
1216 .regmap_config = &kx022a_regmap_config,
1217 .channels = kx022a_channels,
1218 .num_channels = ARRAY_SIZE(kx022a_channels),
1219 .fifo_length = KX022A_FIFO_LENGTH,
1220 .who = KX022A_REG_WHO,
1221 .id = KX132ACR_LBZ_ID,
1222 .cntl = KX022A_REG_CNTL,
1223 .cntl2 = KX022A_REG_CNTL2,
1224 .odcntl = KX022A_REG_ODCNTL,
1225 .buf_cntl1 = KX022A_REG_BUF_CNTL1,
1226 .buf_cntl2 = KX022A_REG_BUF_CNTL2,
1227 .buf_clear = KX022A_REG_BUF_CLEAR,
1228 .buf_status1 = KX022A_REG_BUF_STATUS_1,
1229 .buf_read = KX022A_REG_BUF_READ,
1230 .inc1 = KX022A_REG_INC1,
1231 .inc4 = KX022A_REG_INC4,
1232 .inc5 = KX022A_REG_INC5,
1233 .inc6 = KX022A_REG_INC6,
1234 .xout_l = KX022A_REG_XOUT_L,
1235 .get_fifo_bytes_available = kx022a_get_fifo_bytes_available,
1236 };
1237 EXPORT_SYMBOL_NS_GPL(kx132acr_chip_info, "IIO_KX022A");
1238
kx022a_probe_internal(struct device * dev,const struct kx022a_chip_info * chip_info)1239 int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chip_info)
1240 {
1241 static const char * const regulator_names[] = {"io-vdd", "vdd"};
1242 struct iio_trigger *indio_trig;
1243 struct fwnode_handle *fwnode;
1244 struct kx022a_data *data;
1245 struct regmap *regmap;
1246 unsigned int chip_id;
1247 struct iio_dev *idev;
1248 int ret, irq;
1249 char *name;
1250
1251 regmap = dev_get_regmap(dev, NULL);
1252 if (!regmap) {
1253 dev_err(dev, "no regmap\n");
1254 return -EINVAL;
1255 }
1256
1257 fwnode = dev_fwnode(dev);
1258 if (!fwnode)
1259 return -ENODEV;
1260
1261 idev = devm_iio_device_alloc(dev, sizeof(*data));
1262 if (!idev)
1263 return -ENOMEM;
1264
1265 data = iio_priv(idev);
1266 data->chip_info = chip_info;
1267
1268 /*
1269 * VDD is the analog and digital domain voltage supply and
1270 * IO_VDD is the digital I/O voltage supply.
1271 */
1272 ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
1273 regulator_names);
1274 if (ret && ret != -ENODEV)
1275 return dev_err_probe(dev, ret, "failed to enable regulator\n");
1276
1277 ret = regmap_read(regmap, chip_info->who, &chip_id);
1278 if (ret)
1279 return dev_err_probe(dev, ret, "Failed to access sensor\n");
1280
1281 if (chip_id != chip_info->id)
1282 dev_warn(dev, "unknown device 0x%x\n", chip_id);
1283
1284 irq = fwnode_irq_get_byname(fwnode, "INT1");
1285 if (irq > 0) {
1286 data->inc_reg = chip_info->inc1;
1287 data->ien_reg = chip_info->inc4;
1288 } else {
1289 irq = fwnode_irq_get_byname(fwnode, "INT2");
1290 if (irq < 0)
1291 return dev_err_probe(dev, irq, "No suitable IRQ\n");
1292
1293 data->inc_reg = chip_info->inc5;
1294 data->ien_reg = chip_info->inc6;
1295 }
1296
1297 data->regmap = regmap;
1298 data->dev = dev;
1299 data->irq = irq;
1300 data->odr_ns = KX022A_DEFAULT_PERIOD_NS;
1301 mutex_init(&data->mutex);
1302
1303 idev->channels = chip_info->channels;
1304 idev->num_channels = chip_info->num_channels;
1305 idev->name = chip_info->name;
1306 idev->info = &kx022a_info;
1307 idev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
1308 idev->available_scan_masks = kx022a_scan_masks;
1309
1310 /* Read the mounting matrix, if present */
1311 ret = iio_read_mount_matrix(dev, &data->orientation);
1312 if (ret)
1313 return ret;
1314
1315 /* The sensor must be turned off for configuration */
1316 ret = kx022a_turn_off_lock(data);
1317 if (ret)
1318 return ret;
1319
1320 ret = kx022a_chip_init(data);
1321 if (ret) {
1322 mutex_unlock(&data->mutex);
1323 return ret;
1324 }
1325
1326 ret = kx022a_turn_on_unlock(data);
1327 if (ret)
1328 return ret;
1329
1330 ret = devm_iio_triggered_buffer_setup_ext(dev, idev,
1331 &iio_pollfunc_store_time,
1332 kx022a_trigger_handler,
1333 IIO_BUFFER_DIRECTION_IN,
1334 &kx022a_buffer_ops,
1335 kx022a_fifo_attributes);
1336
1337 if (ret)
1338 return dev_err_probe(data->dev, ret,
1339 "iio_triggered_buffer_setup_ext FAIL\n");
1340 indio_trig = devm_iio_trigger_alloc(dev, "%sdata-rdy-dev%d", idev->name,
1341 iio_device_id(idev));
1342 if (!indio_trig)
1343 return -ENOMEM;
1344
1345 data->trig = indio_trig;
1346
1347 indio_trig->ops = &kx022a_trigger_ops;
1348 iio_trigger_set_drvdata(indio_trig, data);
1349
1350 /*
1351 * No need to check for NULL. request_threaded_irq() defaults to
1352 * dev_name() should the alloc fail.
1353 */
1354 name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-kx022a",
1355 dev_name(data->dev));
1356
1357 ret = devm_request_threaded_irq(data->dev, irq, kx022a_irq_handler,
1358 &kx022a_irq_thread_handler,
1359 IRQF_ONESHOT, name, idev);
1360 if (ret)
1361 return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
1362
1363 ret = devm_iio_trigger_register(dev, indio_trig);
1364 if (ret)
1365 return dev_err_probe(data->dev, ret,
1366 "Trigger registration failed\n");
1367
1368 ret = devm_iio_device_register(data->dev, idev);
1369 if (ret < 0)
1370 return dev_err_probe(dev, ret,
1371 "Unable to register iio device\n");
1372
1373 return ret;
1374 }
1375 EXPORT_SYMBOL_NS_GPL(kx022a_probe_internal, "IIO_KX022A");
1376
1377 MODULE_DESCRIPTION("ROHM/Kionix KX022A accelerometer driver");
1378 MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
1379 MODULE_LICENSE("GPL");
1380