1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * 3-axis accelerometer driver supporting many Bosch-Sensortec chips
4 * Copyright (c) 2014, Intel Corporation.
5 */
6
7 #include <linux/module.h>
8 #include <linux/i2c.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/acpi.h>
13 #include <linux/pm.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/property.h>
16 #include <linux/iio/iio.h>
17 #include <linux/iio/sysfs.h>
18 #include <linux/iio/buffer.h>
19 #include <linux/iio/events.h>
20 #include <linux/iio/trigger.h>
21 #include <linux/iio/trigger_consumer.h>
22 #include <linux/iio/triggered_buffer.h>
23 #include <linux/regmap.h>
24 #include <linux/regulator/consumer.h>
25
26 #include "bmc150-accel.h"
27
28 #define BMC150_ACCEL_REG_CHIP_ID 0x00
29
30 #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
31 #define BMC150_ACCEL_ANY_MOTION_MASK 0x07
32 #define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
33 #define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
34 #define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
35 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
36
37 #define BMC150_ACCEL_REG_PMU_LPW 0x11
38 #define BMC150_ACCEL_PMU_MODE_MASK 0xE0
39 #define BMC150_ACCEL_PMU_MODE_SHIFT 5
40 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK 0x17
41 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT 1
42
43 #define BMC150_ACCEL_REG_PMU_RANGE 0x0F
44
45 #define BMC150_ACCEL_DEF_RANGE_2G 0x03
46 #define BMC150_ACCEL_DEF_RANGE_4G 0x05
47 #define BMC150_ACCEL_DEF_RANGE_8G 0x08
48 #define BMC150_ACCEL_DEF_RANGE_16G 0x0C
49
50 /* Default BW: 125Hz */
51 #define BMC150_ACCEL_REG_PMU_BW 0x10
52 #define BMC150_ACCEL_DEF_BW 125
53
54 #define BMC150_ACCEL_REG_RESET 0x14
55 #define BMC150_ACCEL_RESET_VAL 0xB6
56
57 #define BMC150_ACCEL_REG_INT_MAP_0 0x19
58 #define BMC150_ACCEL_INT_MAP_0_BIT_INT1_SLOPE BIT(2)
59
60 #define BMC150_ACCEL_REG_INT_MAP_1 0x1A
61 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_DATA BIT(0)
62 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_FWM BIT(1)
63 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_FFULL BIT(2)
64 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_FFULL BIT(5)
65 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_FWM BIT(6)
66 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_DATA BIT(7)
67
68 #define BMC150_ACCEL_REG_INT_MAP_2 0x1B
69 #define BMC150_ACCEL_INT_MAP_2_BIT_INT2_SLOPE BIT(2)
70
71 #define BMC150_ACCEL_REG_INT_RST_LATCH 0x21
72 #define BMC150_ACCEL_INT_MODE_LATCH_RESET 0x80
73 #define BMC150_ACCEL_INT_MODE_LATCH_INT 0x0F
74 #define BMC150_ACCEL_INT_MODE_NON_LATCH_INT 0x00
75
76 #define BMC150_ACCEL_REG_INT_EN_0 0x16
77 #define BMC150_ACCEL_INT_EN_BIT_SLP_X BIT(0)
78 #define BMC150_ACCEL_INT_EN_BIT_SLP_Y BIT(1)
79 #define BMC150_ACCEL_INT_EN_BIT_SLP_Z BIT(2)
80
81 #define BMC150_ACCEL_REG_INT_EN_1 0x17
82 #define BMC150_ACCEL_INT_EN_BIT_DATA_EN BIT(4)
83 #define BMC150_ACCEL_INT_EN_BIT_FFULL_EN BIT(5)
84 #define BMC150_ACCEL_INT_EN_BIT_FWM_EN BIT(6)
85
86 #define BMC150_ACCEL_REG_INT_OUT_CTRL 0x20
87 #define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL BIT(0)
88 #define BMC150_ACCEL_INT_OUT_CTRL_INT2_LVL BIT(2)
89
90 #define BMC150_ACCEL_REG_INT_5 0x27
91 #define BMC150_ACCEL_SLOPE_DUR_MASK 0x03
92
93 #define BMC150_ACCEL_REG_INT_6 0x28
94 #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
95
96 /* Slope duration in terms of number of samples */
97 #define BMC150_ACCEL_DEF_SLOPE_DURATION 1
98 /* in terms of multiples of g's/LSB, based on range */
99 #define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
100
101 #define BMC150_ACCEL_REG_XOUT_L 0x02
102
103 #define BMC150_ACCEL_MAX_STARTUP_TIME_MS 100
104
105 /* Sleep Duration values */
106 #define BMC150_ACCEL_SLEEP_500_MICRO 0x05
107 #define BMC150_ACCEL_SLEEP_1_MS 0x06
108 #define BMC150_ACCEL_SLEEP_2_MS 0x07
109 #define BMC150_ACCEL_SLEEP_4_MS 0x08
110 #define BMC150_ACCEL_SLEEP_6_MS 0x09
111 #define BMC150_ACCEL_SLEEP_10_MS 0x0A
112 #define BMC150_ACCEL_SLEEP_25_MS 0x0B
113 #define BMC150_ACCEL_SLEEP_50_MS 0x0C
114 #define BMC150_ACCEL_SLEEP_100_MS 0x0D
115 #define BMC150_ACCEL_SLEEP_500_MS 0x0E
116 #define BMC150_ACCEL_SLEEP_1_SEC 0x0F
117
118 #define BMC150_ACCEL_REG_TEMP 0x08
119 #define BMC150_ACCEL_TEMP_CENTER_VAL 23
120
121 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
122 #define BMC150_AUTO_SUSPEND_DELAY_MS 2000
123
124 #define BMC150_ACCEL_REG_FIFO_STATUS 0x0E
125 #define BMC150_ACCEL_REG_FIFO_CONFIG0 0x30
126 #define BMC150_ACCEL_REG_FIFO_CONFIG1 0x3E
127 #define BMC150_ACCEL_REG_FIFO_DATA 0x3F
128 #define BMC150_ACCEL_FIFO_LENGTH 32
129
130 enum bmc150_accel_axis {
131 AXIS_X,
132 AXIS_Y,
133 AXIS_Z,
134 AXIS_MAX,
135 };
136
137 enum bmc150_power_modes {
138 BMC150_ACCEL_SLEEP_MODE_NORMAL,
139 BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND,
140 BMC150_ACCEL_SLEEP_MODE_LPM,
141 BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04,
142 };
143
144 struct bmc150_scale_info {
145 int scale;
146 u8 reg_range;
147 };
148
149 struct bmc150_accel_chip_info {
150 const char *name;
151 u8 chip_id;
152 const struct iio_chan_spec *channels;
153 int num_channels;
154 const struct bmc150_scale_info scale_table[4];
155 };
156
157 static const struct {
158 int val;
159 int val2;
160 u8 bw_bits;
161 } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
162 {31, 260000, 0x09},
163 {62, 500000, 0x0A},
164 {125, 0, 0x0B},
165 {250, 0, 0x0C},
166 {500, 0, 0x0D},
167 {1000, 0, 0x0E},
168 {2000, 0, 0x0F} };
169
170 static __maybe_unused const struct {
171 int bw_bits;
172 int msec;
173 } bmc150_accel_sample_upd_time[] = { {0x08, 64},
174 {0x09, 32},
175 {0x0A, 16},
176 {0x0B, 8},
177 {0x0C, 4},
178 {0x0D, 2},
179 {0x0E, 1},
180 {0x0F, 1} };
181
182 static const struct {
183 int sleep_dur;
184 u8 reg_value;
185 } bmc150_accel_sleep_value_table[] = { {0, 0},
186 {500, BMC150_ACCEL_SLEEP_500_MICRO},
187 {1000, BMC150_ACCEL_SLEEP_1_MS},
188 {2000, BMC150_ACCEL_SLEEP_2_MS},
189 {4000, BMC150_ACCEL_SLEEP_4_MS},
190 {6000, BMC150_ACCEL_SLEEP_6_MS},
191 {10000, BMC150_ACCEL_SLEEP_10_MS},
192 {25000, BMC150_ACCEL_SLEEP_25_MS},
193 {50000, BMC150_ACCEL_SLEEP_50_MS},
194 {100000, BMC150_ACCEL_SLEEP_100_MS},
195 {500000, BMC150_ACCEL_SLEEP_500_MS},
196 {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
197
198 const struct regmap_config bmc150_regmap_conf = {
199 .reg_bits = 8,
200 .val_bits = 8,
201 .max_register = 0x3f,
202 };
203 EXPORT_SYMBOL_NS_GPL(bmc150_regmap_conf, "IIO_BMC150");
204
bmc150_accel_set_mode(struct bmc150_accel_data * data,enum bmc150_power_modes mode,int dur_us)205 static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
206 enum bmc150_power_modes mode,
207 int dur_us)
208 {
209 struct device *dev = regmap_get_device(data->regmap);
210 int i;
211 int ret;
212 u8 lpw_bits;
213 int dur_val = -1;
214
215 if (dur_us > 0) {
216 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table);
217 ++i) {
218 if (bmc150_accel_sleep_value_table[i].sleep_dur ==
219 dur_us)
220 dur_val =
221 bmc150_accel_sleep_value_table[i].reg_value;
222 }
223 } else {
224 dur_val = 0;
225 }
226
227 if (dur_val < 0)
228 return -EINVAL;
229
230 lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
231 lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
232
233 dev_dbg(dev, "Set Mode bits %x\n", lpw_bits);
234
235 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
236 if (ret < 0) {
237 dev_err(dev, "Error writing reg_pmu_lpw\n");
238 return ret;
239 }
240
241 return 0;
242 }
243
bmc150_accel_set_bw(struct bmc150_accel_data * data,int val,int val2)244 static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
245 int val2)
246 {
247 int i;
248 int ret;
249
250 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
251 if (bmc150_accel_samp_freq_table[i].val == val &&
252 bmc150_accel_samp_freq_table[i].val2 == val2) {
253 ret = regmap_write(data->regmap,
254 BMC150_ACCEL_REG_PMU_BW,
255 bmc150_accel_samp_freq_table[i].bw_bits);
256 if (ret < 0)
257 return ret;
258
259 data->bw_bits =
260 bmc150_accel_samp_freq_table[i].bw_bits;
261 return 0;
262 }
263 }
264
265 return -EINVAL;
266 }
267
bmc150_accel_update_slope(struct bmc150_accel_data * data)268 static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
269 {
270 struct device *dev = regmap_get_device(data->regmap);
271 int ret;
272
273 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
274 data->slope_thres);
275 if (ret < 0) {
276 dev_err(dev, "Error writing reg_int_6\n");
277 return ret;
278 }
279
280 ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
281 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
282 if (ret < 0) {
283 dev_err(dev, "Error updating reg_int_5\n");
284 return ret;
285 }
286
287 dev_dbg(dev, "%x %x\n", data->slope_thres, data->slope_dur);
288
289 return ret;
290 }
291
bmc150_accel_any_motion_setup(struct bmc150_accel_trigger * t,bool state)292 static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t,
293 bool state)
294 {
295 if (state)
296 return bmc150_accel_update_slope(t->data);
297
298 return 0;
299 }
300
bmc150_accel_get_bw(struct bmc150_accel_data * data,int * val,int * val2)301 static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
302 int *val2)
303 {
304 int i;
305
306 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
307 if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) {
308 *val = bmc150_accel_samp_freq_table[i].val;
309 *val2 = bmc150_accel_samp_freq_table[i].val2;
310 return IIO_VAL_INT_PLUS_MICRO;
311 }
312 }
313
314 return -EINVAL;
315 }
316
317 #ifdef CONFIG_PM
bmc150_accel_get_startup_times(struct bmc150_accel_data * data)318 static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
319 {
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) {
323 if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits)
324 return bmc150_accel_sample_upd_time[i].msec;
325 }
326
327 return BMC150_ACCEL_MAX_STARTUP_TIME_MS;
328 }
329
bmc150_accel_set_power_state(struct bmc150_accel_data * data,bool on)330 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
331 {
332 struct device *dev = regmap_get_device(data->regmap);
333 int ret;
334
335 if (on)
336 ret = pm_runtime_resume_and_get(dev);
337 else
338 ret = pm_runtime_put_autosuspend(dev);
339 if (ret < 0) {
340 dev_err(dev,
341 "Failed: %s for %d\n", __func__, on);
342 return ret;
343 }
344
345 return 0;
346 }
347 #else
bmc150_accel_set_power_state(struct bmc150_accel_data * data,bool on)348 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
349 {
350 return 0;
351 }
352 #endif
353
354 #ifdef CONFIG_ACPI
355 /*
356 * Support for getting accelerometer information from BOSC0200 ACPI nodes.
357 *
358 * There are 2 variants of the BOSC0200 ACPI node. Some 2-in-1s with 360 degree
359 * hinges declare 2 I2C ACPI-resources for 2 accelerometers, 1 in the display
360 * and 1 in the base of the 2-in-1. On these 2-in-1s the ROMS ACPI object
361 * contains the mount-matrix for the sensor in the display and ROMK contains
362 * the mount-matrix for the sensor in the base. On devices using a single
363 * sensor there is a ROTM ACPI object which contains the mount-matrix.
364 *
365 * Here is an incomplete list of devices known to use 1 of these setups:
366 *
367 * Yoga devices with 2 accelerometers using ROMS + ROMK for the mount-matrices:
368 * Lenovo Thinkpad Yoga 11e 3th gen
369 * Lenovo Thinkpad Yoga 11e 4th gen
370 *
371 * Tablets using a single accelerometer using ROTM for the mount-matrix:
372 * Chuwi Hi8 Pro (CWI513)
373 * Chuwi Vi8 Plus (CWI519)
374 * Chuwi Hi13
375 * Irbis TW90
376 * Jumper EZpad mini 3
377 * Onda V80 plus
378 * Predia Basic Tablet
379 */
bmc150_apply_bosc0200_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)380 static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
381 struct iio_mount_matrix *orientation)
382 {
383 struct iio_dev *indio_dev = dev_get_drvdata(dev);
384 acpi_handle handle = ACPI_HANDLE(dev);
385 char *name, *alt_name, *label;
386
387 if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) {
388 alt_name = "ROMK";
389 label = "accel-base";
390 } else {
391 alt_name = "ROMS";
392 label = "accel-display";
393 }
394
395 if (acpi_has_method(handle, "ROTM")) {
396 name = "ROTM";
397 } else if (acpi_has_method(handle, alt_name)) {
398 name = alt_name;
399 indio_dev->label = label;
400 } else {
401 return false;
402 }
403
404 return iio_read_acpi_mount_matrix(dev, orientation, name);
405 }
406
bmc150_apply_dual250e_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)407 static bool bmc150_apply_dual250e_acpi_orientation(struct device *dev,
408 struct iio_mount_matrix *orientation)
409 {
410 struct iio_dev *indio_dev = dev_get_drvdata(dev);
411
412 if (strcmp(dev_name(dev), "i2c-DUAL250E:base") == 0)
413 indio_dev->label = "accel-base";
414 else
415 indio_dev->label = "accel-display";
416
417 return false; /* DUAL250E fwnodes have no mount matrix info */
418 }
419
bmc150_apply_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)420 static bool bmc150_apply_acpi_orientation(struct device *dev,
421 struct iio_mount_matrix *orientation)
422 {
423 struct acpi_device *adev = ACPI_COMPANION(dev);
424
425 if (adev && acpi_dev_hid_uid_match(adev, "BOSC0200", NULL))
426 return bmc150_apply_bosc0200_acpi_orientation(dev, orientation);
427
428 if (adev && acpi_dev_hid_uid_match(adev, "DUAL250E", NULL))
429 return bmc150_apply_dual250e_acpi_orientation(dev, orientation);
430
431 return false;
432 }
433 #else
bmc150_apply_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)434 static bool bmc150_apply_acpi_orientation(struct device *dev,
435 struct iio_mount_matrix *orientation)
436 {
437 return false;
438 }
439 #endif
440
441 struct bmc150_accel_interrupt_info {
442 u8 map_reg;
443 u8 map_bitmask;
444 u8 en_reg;
445 u8 en_bitmask;
446 };
447
448 static const struct bmc150_accel_interrupt_info
449 bmc150_accel_interrupts_int1[BMC150_ACCEL_INTERRUPTS] = {
450 { /* data ready interrupt */
451 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
452 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT1_DATA,
453 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
454 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN,
455 },
456 { /* motion interrupt */
457 .map_reg = BMC150_ACCEL_REG_INT_MAP_0,
458 .map_bitmask = BMC150_ACCEL_INT_MAP_0_BIT_INT1_SLOPE,
459 .en_reg = BMC150_ACCEL_REG_INT_EN_0,
460 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X |
461 BMC150_ACCEL_INT_EN_BIT_SLP_Y |
462 BMC150_ACCEL_INT_EN_BIT_SLP_Z
463 },
464 { /* fifo watermark interrupt */
465 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
466 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT1_FWM,
467 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
468 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN,
469 },
470 };
471
472 static const struct bmc150_accel_interrupt_info
473 bmc150_accel_interrupts_int2[BMC150_ACCEL_INTERRUPTS] = {
474 { /* data ready interrupt */
475 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
476 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT2_DATA,
477 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
478 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN,
479 },
480 { /* motion interrupt */
481 .map_reg = BMC150_ACCEL_REG_INT_MAP_2,
482 .map_bitmask = BMC150_ACCEL_INT_MAP_2_BIT_INT2_SLOPE,
483 .en_reg = BMC150_ACCEL_REG_INT_EN_0,
484 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X |
485 BMC150_ACCEL_INT_EN_BIT_SLP_Y |
486 BMC150_ACCEL_INT_EN_BIT_SLP_Z
487 },
488 { /* fifo watermark interrupt */
489 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
490 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT2_FWM,
491 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
492 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN,
493 },
494 };
495
bmc150_accel_interrupts_setup(struct iio_dev * indio_dev,struct bmc150_accel_data * data,int irq)496 static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
497 struct bmc150_accel_data *data, int irq)
498 {
499 const struct bmc150_accel_interrupt_info *irq_info = NULL;
500 struct device *dev = regmap_get_device(data->regmap);
501 int i;
502
503 /*
504 * For now we map all interrupts to the same output pin.
505 * However, some boards may have just INT2 (and not INT1) connected,
506 * so we try to detect which IRQ it is based on the interrupt-names.
507 * Without interrupt-names, we assume the irq belongs to INT1.
508 */
509 irq_info = bmc150_accel_interrupts_int1;
510 if (data->type == BOSCH_BMC156 ||
511 irq == fwnode_irq_get_byname(dev_fwnode(dev), "INT2"))
512 irq_info = bmc150_accel_interrupts_int2;
513
514 for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++)
515 data->interrupts[i].info = &irq_info[i];
516 }
517
bmc150_accel_set_interrupt(struct bmc150_accel_data * data,int i,bool state)518 static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
519 bool state)
520 {
521 struct device *dev = regmap_get_device(data->regmap);
522 struct bmc150_accel_interrupt *intr = &data->interrupts[i];
523 const struct bmc150_accel_interrupt_info *info = intr->info;
524 int ret;
525
526 /* We do not always have an IRQ */
527 if (data->irq <= 0)
528 return 0;
529
530 if (state) {
531 if (atomic_inc_return(&intr->users) > 1)
532 return 0;
533 } else {
534 if (atomic_dec_return(&intr->users) > 0)
535 return 0;
536 }
537
538 /*
539 * We will expect the enable and disable to do operation in reverse
540 * order. This will happen here anyway, as our resume operation uses
541 * sync mode runtime pm calls. The suspend operation will be delayed
542 * by autosuspend delay.
543 * So the disable operation will still happen in reverse order of
544 * enable operation. When runtime pm is disabled the mode is always on,
545 * so sequence doesn't matter.
546 */
547 ret = bmc150_accel_set_power_state(data, state);
548 if (ret < 0)
549 return ret;
550
551 /* map the interrupt to the appropriate pins */
552 ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
553 (state ? info->map_bitmask : 0));
554 if (ret < 0) {
555 dev_err(dev, "Error updating reg_int_map\n");
556 goto out_fix_power_state;
557 }
558
559 /* enable/disable the interrupt */
560 ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
561 (state ? info->en_bitmask : 0));
562 if (ret < 0) {
563 dev_err(dev, "Error updating reg_int_en\n");
564 goto out_fix_power_state;
565 }
566
567 return 0;
568
569 out_fix_power_state:
570 bmc150_accel_set_power_state(data, false);
571 return ret;
572 }
573
bmc150_accel_set_scale(struct bmc150_accel_data * data,int val)574 static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
575 {
576 struct device *dev = regmap_get_device(data->regmap);
577 int ret, i;
578
579 for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
580 if (data->chip_info->scale_table[i].scale == val) {
581 ret = regmap_write(data->regmap,
582 BMC150_ACCEL_REG_PMU_RANGE,
583 data->chip_info->scale_table[i].reg_range);
584 if (ret < 0) {
585 dev_err(dev, "Error writing pmu_range\n");
586 return ret;
587 }
588
589 data->range = data->chip_info->scale_table[i].reg_range;
590 return 0;
591 }
592 }
593
594 return -EINVAL;
595 }
596
bmc150_accel_get_temp(struct bmc150_accel_data * data,int * val)597 static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
598 {
599 struct device *dev = regmap_get_device(data->regmap);
600 int ret;
601 unsigned int value;
602
603 mutex_lock(&data->mutex);
604
605 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
606 if (ret < 0) {
607 dev_err(dev, "Error reading reg_temp\n");
608 mutex_unlock(&data->mutex);
609 return ret;
610 }
611 *val = sign_extend32(value, 7);
612
613 mutex_unlock(&data->mutex);
614
615 return IIO_VAL_INT;
616 }
617
bmc150_accel_get_axis(struct bmc150_accel_data * data,struct iio_chan_spec const * chan,int * val)618 static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
619 struct iio_chan_spec const *chan,
620 int *val)
621 {
622 struct device *dev = regmap_get_device(data->regmap);
623 int ret;
624 int axis = chan->scan_index;
625 __le16 raw_val;
626
627 mutex_lock(&data->mutex);
628 ret = bmc150_accel_set_power_state(data, true);
629 if (ret < 0) {
630 mutex_unlock(&data->mutex);
631 return ret;
632 }
633
634 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
635 &raw_val, sizeof(raw_val));
636 if (ret < 0) {
637 dev_err(dev, "Error reading axis %d\n", axis);
638 bmc150_accel_set_power_state(data, false);
639 mutex_unlock(&data->mutex);
640 return ret;
641 }
642 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
643 chan->scan_type.realbits - 1);
644 ret = bmc150_accel_set_power_state(data, false);
645 mutex_unlock(&data->mutex);
646 if (ret < 0)
647 return ret;
648
649 return IIO_VAL_INT;
650 }
651
bmc150_accel_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)652 static int bmc150_accel_read_raw(struct iio_dev *indio_dev,
653 struct iio_chan_spec const *chan,
654 int *val, int *val2, long mask)
655 {
656 struct bmc150_accel_data *data = iio_priv(indio_dev);
657 int ret;
658
659 switch (mask) {
660 case IIO_CHAN_INFO_RAW:
661 switch (chan->type) {
662 case IIO_TEMP:
663 return bmc150_accel_get_temp(data, val);
664 case IIO_ACCEL:
665 if (iio_buffer_enabled(indio_dev))
666 return -EBUSY;
667 else
668 return bmc150_accel_get_axis(data, chan, val);
669 default:
670 return -EINVAL;
671 }
672 case IIO_CHAN_INFO_OFFSET:
673 if (chan->type == IIO_TEMP) {
674 *val = BMC150_ACCEL_TEMP_CENTER_VAL;
675 return IIO_VAL_INT;
676 } else {
677 return -EINVAL;
678 }
679 case IIO_CHAN_INFO_SCALE:
680 *val = 0;
681 switch (chan->type) {
682 case IIO_TEMP:
683 *val2 = 500000;
684 return IIO_VAL_INT_PLUS_MICRO;
685 case IIO_ACCEL:
686 {
687 int i;
688 const struct bmc150_scale_info *si;
689 int st_size = ARRAY_SIZE(data->chip_info->scale_table);
690
691 for (i = 0; i < st_size; ++i) {
692 si = &data->chip_info->scale_table[i];
693 if (si->reg_range == data->range) {
694 *val2 = si->scale;
695 return IIO_VAL_INT_PLUS_MICRO;
696 }
697 }
698 return -EINVAL;
699 }
700 default:
701 return -EINVAL;
702 }
703 case IIO_CHAN_INFO_SAMP_FREQ:
704 mutex_lock(&data->mutex);
705 ret = bmc150_accel_get_bw(data, val, val2);
706 mutex_unlock(&data->mutex);
707 return ret;
708 default:
709 return -EINVAL;
710 }
711 }
712
bmc150_accel_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)713 static int bmc150_accel_write_raw(struct iio_dev *indio_dev,
714 struct iio_chan_spec const *chan,
715 int val, int val2, long mask)
716 {
717 struct bmc150_accel_data *data = iio_priv(indio_dev);
718 int ret;
719
720 switch (mask) {
721 case IIO_CHAN_INFO_SAMP_FREQ:
722 mutex_lock(&data->mutex);
723 ret = bmc150_accel_set_bw(data, val, val2);
724 mutex_unlock(&data->mutex);
725 break;
726 case IIO_CHAN_INFO_SCALE:
727 if (val)
728 return -EINVAL;
729
730 mutex_lock(&data->mutex);
731 ret = bmc150_accel_set_scale(data, val2);
732 mutex_unlock(&data->mutex);
733 return ret;
734 default:
735 ret = -EINVAL;
736 }
737
738 return ret;
739 }
740
bmc150_accel_read_event(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,enum iio_event_info info,int * val,int * val2)741 static int bmc150_accel_read_event(struct iio_dev *indio_dev,
742 const struct iio_chan_spec *chan,
743 enum iio_event_type type,
744 enum iio_event_direction dir,
745 enum iio_event_info info,
746 int *val, int *val2)
747 {
748 struct bmc150_accel_data *data = iio_priv(indio_dev);
749
750 *val2 = 0;
751 switch (info) {
752 case IIO_EV_INFO_VALUE:
753 *val = data->slope_thres;
754 break;
755 case IIO_EV_INFO_PERIOD:
756 *val = data->slope_dur;
757 break;
758 default:
759 return -EINVAL;
760 }
761
762 return IIO_VAL_INT;
763 }
764
bmc150_accel_write_event(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,enum iio_event_info info,int val,int val2)765 static int bmc150_accel_write_event(struct iio_dev *indio_dev,
766 const struct iio_chan_spec *chan,
767 enum iio_event_type type,
768 enum iio_event_direction dir,
769 enum iio_event_info info,
770 int val, int val2)
771 {
772 struct bmc150_accel_data *data = iio_priv(indio_dev);
773
774 if (data->ev_enable_state)
775 return -EBUSY;
776
777 switch (info) {
778 case IIO_EV_INFO_VALUE:
779 data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK;
780 break;
781 case IIO_EV_INFO_PERIOD:
782 data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK;
783 break;
784 default:
785 return -EINVAL;
786 }
787
788 return 0;
789 }
790
bmc150_accel_read_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir)791 static int bmc150_accel_read_event_config(struct iio_dev *indio_dev,
792 const struct iio_chan_spec *chan,
793 enum iio_event_type type,
794 enum iio_event_direction dir)
795 {
796 struct bmc150_accel_data *data = iio_priv(indio_dev);
797
798 return data->ev_enable_state;
799 }
800
bmc150_accel_write_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,bool state)801 static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
802 const struct iio_chan_spec *chan,
803 enum iio_event_type type,
804 enum iio_event_direction dir,
805 bool state)
806 {
807 struct bmc150_accel_data *data = iio_priv(indio_dev);
808 int ret;
809
810 if (state == data->ev_enable_state)
811 return 0;
812
813 mutex_lock(&data->mutex);
814
815 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_ANY_MOTION,
816 state);
817 if (ret < 0) {
818 mutex_unlock(&data->mutex);
819 return ret;
820 }
821
822 data->ev_enable_state = state;
823 mutex_unlock(&data->mutex);
824
825 return 0;
826 }
827
bmc150_accel_validate_trigger(struct iio_dev * indio_dev,struct iio_trigger * trig)828 static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
829 struct iio_trigger *trig)
830 {
831 struct bmc150_accel_data *data = iio_priv(indio_dev);
832 int i;
833
834 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
835 if (data->triggers[i].indio_trig == trig)
836 return 0;
837 }
838
839 return -EINVAL;
840 }
841
bmc150_accel_get_fifo_watermark(struct device * dev,struct device_attribute * attr,char * buf)842 static ssize_t bmc150_accel_get_fifo_watermark(struct device *dev,
843 struct device_attribute *attr,
844 char *buf)
845 {
846 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
847 struct bmc150_accel_data *data = iio_priv(indio_dev);
848 int wm;
849
850 mutex_lock(&data->mutex);
851 wm = data->watermark;
852 mutex_unlock(&data->mutex);
853
854 return sprintf(buf, "%d\n", wm);
855 }
856
bmc150_accel_get_fifo_state(struct device * dev,struct device_attribute * attr,char * buf)857 static ssize_t bmc150_accel_get_fifo_state(struct device *dev,
858 struct device_attribute *attr,
859 char *buf)
860 {
861 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
862 struct bmc150_accel_data *data = iio_priv(indio_dev);
863 bool state;
864
865 mutex_lock(&data->mutex);
866 state = data->fifo_mode;
867 mutex_unlock(&data->mutex);
868
869 return sprintf(buf, "%d\n", state);
870 }
871
872 static const struct iio_mount_matrix *
bmc150_accel_get_mount_matrix(const struct iio_dev * indio_dev,const struct iio_chan_spec * chan)873 bmc150_accel_get_mount_matrix(const struct iio_dev *indio_dev,
874 const struct iio_chan_spec *chan)
875 {
876 struct bmc150_accel_data *data = iio_priv(indio_dev);
877
878 return &data->orientation;
879 }
880
881 static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = {
882 IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_accel_get_mount_matrix),
883 { }
884 };
885
886 IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_min, "1");
887 IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_max,
888 __stringify(BMC150_ACCEL_FIFO_LENGTH));
889 static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO,
890 bmc150_accel_get_fifo_state, NULL, 0);
891 static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO,
892 bmc150_accel_get_fifo_watermark, NULL, 0);
893
894 static const struct iio_dev_attr *bmc150_accel_fifo_attributes[] = {
895 &iio_dev_attr_hwfifo_watermark_min,
896 &iio_dev_attr_hwfifo_watermark_max,
897 &iio_dev_attr_hwfifo_watermark,
898 &iio_dev_attr_hwfifo_enabled,
899 NULL,
900 };
901
bmc150_accel_set_watermark(struct iio_dev * indio_dev,unsigned val)902 static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
903 {
904 struct bmc150_accel_data *data = iio_priv(indio_dev);
905
906 if (val > BMC150_ACCEL_FIFO_LENGTH)
907 val = BMC150_ACCEL_FIFO_LENGTH;
908
909 mutex_lock(&data->mutex);
910 data->watermark = val;
911 mutex_unlock(&data->mutex);
912
913 return 0;
914 }
915
916 /*
917 * We must read at least one full frame in one burst, otherwise the rest of the
918 * frame data is discarded.
919 */
bmc150_accel_fifo_transfer(struct bmc150_accel_data * data,char * buffer,int samples)920 static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
921 char *buffer, int samples)
922 {
923 struct device *dev = regmap_get_device(data->regmap);
924 int sample_length = 3 * 2;
925 int ret;
926 int total_length = samples * sample_length;
927
928 ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA,
929 buffer, total_length);
930 if (ret)
931 dev_err(dev,
932 "Error transferring data from fifo: %d\n", ret);
933
934 return ret;
935 }
936
__bmc150_accel_fifo_flush(struct iio_dev * indio_dev,unsigned samples,bool irq)937 static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
938 unsigned samples, bool irq)
939 {
940 struct bmc150_accel_data *data = iio_priv(indio_dev);
941 struct device *dev = regmap_get_device(data->regmap);
942 int ret, i;
943 u8 count;
944 u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
945 int64_t tstamp;
946 uint64_t sample_period;
947 unsigned int val;
948
949 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
950 if (ret < 0) {
951 dev_err(dev, "Error reading reg_fifo_status\n");
952 return ret;
953 }
954
955 count = val & 0x7F;
956
957 if (!count)
958 return 0;
959
960 /*
961 * If we getting called from IRQ handler we know the stored timestamp is
962 * fairly accurate for the last stored sample. Otherwise, if we are
963 * called as a result of a read operation from userspace and hence
964 * before the watermark interrupt was triggered, take a timestamp
965 * now. We can fall anywhere in between two samples so the error in this
966 * case is at most one sample period.
967 */
968 if (!irq) {
969 data->old_timestamp = data->timestamp;
970 data->timestamp = iio_get_time_ns(indio_dev);
971 }
972
973 /*
974 * Approximate timestamps for each of the sample based on the sampling
975 * frequency, timestamp for last sample and number of samples.
976 *
977 * Note that we can't use the current bandwidth settings to compute the
978 * sample period because the sample rate varies with the device
979 * (e.g. between 31.70ms to 32.20ms for a bandwidth of 15.63HZ). That
980 * small variation adds when we store a large number of samples and
981 * creates significant jitter between the last and first samples in
982 * different batches (e.g. 32ms vs 21ms).
983 *
984 * To avoid this issue we compute the actual sample period ourselves
985 * based on the timestamp delta between the last two flush operations.
986 */
987 sample_period = (data->timestamp - data->old_timestamp);
988 do_div(sample_period, count);
989 tstamp = data->timestamp - (count - 1) * sample_period;
990
991 if (samples && count > samples)
992 count = samples;
993
994 ret = bmc150_accel_fifo_transfer(data, (u8 *)buffer, count);
995 if (ret)
996 return ret;
997
998 /*
999 * Ideally we want the IIO core to handle the demux when running in fifo
1000 * mode but not when running in triggered buffer mode. Unfortunately
1001 * this does not seem to be possible, so stick with driver demux for
1002 * now.
1003 */
1004 for (i = 0; i < count; i++) {
1005 int j, bit;
1006
1007 j = 0;
1008 iio_for_each_active_channel(indio_dev, bit)
1009 memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
1010 sizeof(data->scan.channels[0]));
1011
1012 iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1013 tstamp);
1014
1015 tstamp += sample_period;
1016 }
1017
1018 return count;
1019 }
1020
bmc150_accel_fifo_flush(struct iio_dev * indio_dev,unsigned samples)1021 static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples)
1022 {
1023 struct bmc150_accel_data *data = iio_priv(indio_dev);
1024 int ret;
1025
1026 mutex_lock(&data->mutex);
1027 ret = __bmc150_accel_fifo_flush(indio_dev, samples, false);
1028 mutex_unlock(&data->mutex);
1029
1030 return ret;
1031 }
1032
1033 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
1034 "15.620000 31.260000 62.50000 125 250 500 1000 2000");
1035
1036 static struct attribute *bmc150_accel_attributes[] = {
1037 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1038 NULL,
1039 };
1040
1041 static const struct attribute_group bmc150_accel_attrs_group = {
1042 .attrs = bmc150_accel_attributes,
1043 };
1044
1045 static const struct iio_event_spec bmc150_accel_event = {
1046 .type = IIO_EV_TYPE_ROC,
1047 .dir = IIO_EV_DIR_EITHER,
1048 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
1049 BIT(IIO_EV_INFO_ENABLE) |
1050 BIT(IIO_EV_INFO_PERIOD)
1051 };
1052
1053 #define BMC150_ACCEL_CHANNEL(_axis, bits) { \
1054 .type = IIO_ACCEL, \
1055 .modified = 1, \
1056 .channel2 = IIO_MOD_##_axis, \
1057 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
1058 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
1059 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
1060 .scan_index = AXIS_##_axis, \
1061 .scan_type = { \
1062 .sign = 's', \
1063 .realbits = (bits), \
1064 .storagebits = 16, \
1065 .shift = 16 - (bits), \
1066 .endianness = IIO_LE, \
1067 }, \
1068 .ext_info = bmc150_accel_ext_info, \
1069 .event_spec = &bmc150_accel_event, \
1070 .num_event_specs = 1 \
1071 }
1072
1073 #define BMC150_ACCEL_CHANNELS(bits) { \
1074 { \
1075 .type = IIO_TEMP, \
1076 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
1077 BIT(IIO_CHAN_INFO_SCALE) | \
1078 BIT(IIO_CHAN_INFO_OFFSET), \
1079 .scan_index = -1, \
1080 }, \
1081 BMC150_ACCEL_CHANNEL(X, bits), \
1082 BMC150_ACCEL_CHANNEL(Y, bits), \
1083 BMC150_ACCEL_CHANNEL(Z, bits), \
1084 IIO_CHAN_SOFT_TIMESTAMP(3), \
1085 }
1086
1087 static const struct iio_chan_spec bma222e_accel_channels[] =
1088 BMC150_ACCEL_CHANNELS(8);
1089 static const struct iio_chan_spec bma250e_accel_channels[] =
1090 BMC150_ACCEL_CHANNELS(10);
1091 static const struct iio_chan_spec bmc150_accel_channels[] =
1092 BMC150_ACCEL_CHANNELS(12);
1093 static const struct iio_chan_spec bma280_accel_channels[] =
1094 BMC150_ACCEL_CHANNELS(14);
1095
1096 /*
1097 * The range for the Bosch sensors is typically +-2g/4g/8g/16g, distributed
1098 * over the amount of bits (see above). The scale table can be calculated using
1099 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
1100 * e.g. for +-2g and 12 bits: (4 / 2^12) * 9.80665 m/s^2 = 0.0095768... m/s^2
1101 * Multiply 10^6 and round to get the values listed below.
1102 */
1103 static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
1104 {
1105 .name = "BMA222",
1106 .chip_id = 0x03,
1107 .channels = bma222e_accel_channels,
1108 .num_channels = ARRAY_SIZE(bma222e_accel_channels),
1109 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G},
1110 {306458, BMC150_ACCEL_DEF_RANGE_4G},
1111 {612916, BMC150_ACCEL_DEF_RANGE_8G},
1112 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
1113 },
1114 {
1115 .name = "BMA222E",
1116 .chip_id = 0xF8,
1117 .channels = bma222e_accel_channels,
1118 .num_channels = ARRAY_SIZE(bma222e_accel_channels),
1119 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G},
1120 {306458, BMC150_ACCEL_DEF_RANGE_4G},
1121 {612916, BMC150_ACCEL_DEF_RANGE_8G},
1122 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
1123 },
1124 {
1125 .name = "BMA250E",
1126 .chip_id = 0xF9,
1127 .channels = bma250e_accel_channels,
1128 .num_channels = ARRAY_SIZE(bma250e_accel_channels),
1129 .scale_table = { {38307, BMC150_ACCEL_DEF_RANGE_2G},
1130 {76614, BMC150_ACCEL_DEF_RANGE_4G},
1131 {153229, BMC150_ACCEL_DEF_RANGE_8G},
1132 {306458, BMC150_ACCEL_DEF_RANGE_16G} },
1133 },
1134 {
1135 .name = "BMA253/BMA254/BMA255/BMC150/BMC156/BMI055",
1136 .chip_id = 0xFA,
1137 .channels = bmc150_accel_channels,
1138 .num_channels = ARRAY_SIZE(bmc150_accel_channels),
1139 .scale_table = { {9577, BMC150_ACCEL_DEF_RANGE_2G},
1140 {19154, BMC150_ACCEL_DEF_RANGE_4G},
1141 {38307, BMC150_ACCEL_DEF_RANGE_8G},
1142 {76614, BMC150_ACCEL_DEF_RANGE_16G} },
1143 },
1144 {
1145 .name = "BMA280",
1146 .chip_id = 0xFB,
1147 .channels = bma280_accel_channels,
1148 .num_channels = ARRAY_SIZE(bma280_accel_channels),
1149 .scale_table = { {2394, BMC150_ACCEL_DEF_RANGE_2G},
1150 {4788, BMC150_ACCEL_DEF_RANGE_4G},
1151 {9577, BMC150_ACCEL_DEF_RANGE_8G},
1152 {19154, BMC150_ACCEL_DEF_RANGE_16G} },
1153 },
1154 };
1155
1156 static const struct iio_info bmc150_accel_info = {
1157 .attrs = &bmc150_accel_attrs_group,
1158 .read_raw = bmc150_accel_read_raw,
1159 .write_raw = bmc150_accel_write_raw,
1160 .read_event_value = bmc150_accel_read_event,
1161 .write_event_value = bmc150_accel_write_event,
1162 .write_event_config = bmc150_accel_write_event_config,
1163 .read_event_config = bmc150_accel_read_event_config,
1164 };
1165
1166 static const struct iio_info bmc150_accel_info_fifo = {
1167 .attrs = &bmc150_accel_attrs_group,
1168 .read_raw = bmc150_accel_read_raw,
1169 .write_raw = bmc150_accel_write_raw,
1170 .read_event_value = bmc150_accel_read_event,
1171 .write_event_value = bmc150_accel_write_event,
1172 .write_event_config = bmc150_accel_write_event_config,
1173 .read_event_config = bmc150_accel_read_event_config,
1174 .validate_trigger = bmc150_accel_validate_trigger,
1175 .hwfifo_set_watermark = bmc150_accel_set_watermark,
1176 .hwfifo_flush_to_buffer = bmc150_accel_fifo_flush,
1177 };
1178
1179 static const unsigned long bmc150_accel_scan_masks[] = {
1180 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
1181 0};
1182
bmc150_accel_trigger_handler(int irq,void * p)1183 static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
1184 {
1185 struct iio_poll_func *pf = p;
1186 struct iio_dev *indio_dev = pf->indio_dev;
1187 struct bmc150_accel_data *data = iio_priv(indio_dev);
1188 int ret;
1189
1190 mutex_lock(&data->mutex);
1191 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L,
1192 data->buffer, AXIS_MAX * 2);
1193 mutex_unlock(&data->mutex);
1194 if (ret < 0)
1195 goto err_read;
1196
1197 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
1198 pf->timestamp);
1199 err_read:
1200 iio_trigger_notify_done(indio_dev->trig);
1201
1202 return IRQ_HANDLED;
1203 }
1204
bmc150_accel_trig_reen(struct iio_trigger * trig)1205 static void bmc150_accel_trig_reen(struct iio_trigger *trig)
1206 {
1207 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1208 struct bmc150_accel_data *data = t->data;
1209 struct device *dev = regmap_get_device(data->regmap);
1210 int ret;
1211
1212 /* new data interrupts don't need ack */
1213 if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY])
1214 return;
1215
1216 mutex_lock(&data->mutex);
1217 /* clear any latched interrupt */
1218 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1219 BMC150_ACCEL_INT_MODE_LATCH_INT |
1220 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1221 mutex_unlock(&data->mutex);
1222 if (ret < 0)
1223 dev_err(dev, "Error writing reg_int_rst_latch\n");
1224 }
1225
bmc150_accel_trigger_set_state(struct iio_trigger * trig,bool state)1226 static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
1227 bool state)
1228 {
1229 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1230 struct bmc150_accel_data *data = t->data;
1231 int ret;
1232
1233 mutex_lock(&data->mutex);
1234
1235 if (t->enabled == state) {
1236 mutex_unlock(&data->mutex);
1237 return 0;
1238 }
1239
1240 if (t->setup) {
1241 ret = t->setup(t, state);
1242 if (ret < 0) {
1243 mutex_unlock(&data->mutex);
1244 return ret;
1245 }
1246 }
1247
1248 ret = bmc150_accel_set_interrupt(data, t->intr, state);
1249 if (ret < 0) {
1250 mutex_unlock(&data->mutex);
1251 return ret;
1252 }
1253
1254 t->enabled = state;
1255
1256 mutex_unlock(&data->mutex);
1257
1258 return ret;
1259 }
1260
1261 static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
1262 .set_trigger_state = bmc150_accel_trigger_set_state,
1263 .reenable = bmc150_accel_trig_reen,
1264 };
1265
bmc150_accel_handle_roc_event(struct iio_dev * indio_dev)1266 static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
1267 {
1268 struct bmc150_accel_data *data = iio_priv(indio_dev);
1269 struct device *dev = regmap_get_device(data->regmap);
1270 int dir;
1271 int ret;
1272 unsigned int val;
1273
1274 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
1275 if (ret < 0) {
1276 dev_err(dev, "Error reading reg_int_status_2\n");
1277 return ret;
1278 }
1279
1280 if (val & BMC150_ACCEL_ANY_MOTION_BIT_SIGN)
1281 dir = IIO_EV_DIR_FALLING;
1282 else
1283 dir = IIO_EV_DIR_RISING;
1284
1285 if (val & BMC150_ACCEL_ANY_MOTION_BIT_X)
1286 iio_push_event(indio_dev,
1287 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1288 0,
1289 IIO_MOD_X,
1290 IIO_EV_TYPE_ROC,
1291 dir),
1292 data->timestamp);
1293
1294 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Y)
1295 iio_push_event(indio_dev,
1296 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1297 0,
1298 IIO_MOD_Y,
1299 IIO_EV_TYPE_ROC,
1300 dir),
1301 data->timestamp);
1302
1303 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Z)
1304 iio_push_event(indio_dev,
1305 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1306 0,
1307 IIO_MOD_Z,
1308 IIO_EV_TYPE_ROC,
1309 dir),
1310 data->timestamp);
1311
1312 return ret;
1313 }
1314
bmc150_accel_irq_thread_handler(int irq,void * private)1315 static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
1316 {
1317 struct iio_dev *indio_dev = private;
1318 struct bmc150_accel_data *data = iio_priv(indio_dev);
1319 struct device *dev = regmap_get_device(data->regmap);
1320 bool ack = false;
1321 int ret;
1322
1323 mutex_lock(&data->mutex);
1324
1325 if (data->fifo_mode) {
1326 ret = __bmc150_accel_fifo_flush(indio_dev,
1327 BMC150_ACCEL_FIFO_LENGTH, true);
1328 if (ret > 0)
1329 ack = true;
1330 }
1331
1332 if (data->ev_enable_state) {
1333 ret = bmc150_accel_handle_roc_event(indio_dev);
1334 if (ret > 0)
1335 ack = true;
1336 }
1337
1338 if (ack) {
1339 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1340 BMC150_ACCEL_INT_MODE_LATCH_INT |
1341 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1342 if (ret)
1343 dev_err(dev, "Error writing reg_int_rst_latch\n");
1344
1345 ret = IRQ_HANDLED;
1346 } else {
1347 ret = IRQ_NONE;
1348 }
1349
1350 mutex_unlock(&data->mutex);
1351
1352 return ret;
1353 }
1354
bmc150_accel_irq_handler(int irq,void * private)1355 static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
1356 {
1357 struct iio_dev *indio_dev = private;
1358 struct bmc150_accel_data *data = iio_priv(indio_dev);
1359 bool ack = false;
1360 int i;
1361
1362 data->old_timestamp = data->timestamp;
1363 data->timestamp = iio_get_time_ns(indio_dev);
1364
1365 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1366 if (data->triggers[i].enabled) {
1367 iio_trigger_poll(data->triggers[i].indio_trig);
1368 ack = true;
1369 break;
1370 }
1371 }
1372
1373 if (data->ev_enable_state || data->fifo_mode)
1374 return IRQ_WAKE_THREAD;
1375
1376 if (ack)
1377 return IRQ_HANDLED;
1378
1379 return IRQ_NONE;
1380 }
1381
1382 static const struct {
1383 int intr;
1384 const char *name;
1385 int (*setup)(struct bmc150_accel_trigger *t, bool state);
1386 } bmc150_accel_triggers[BMC150_ACCEL_TRIGGERS] = {
1387 {
1388 .intr = 0,
1389 .name = "%s-dev%d",
1390 },
1391 {
1392 .intr = 1,
1393 .name = "%s-any-motion-dev%d",
1394 .setup = bmc150_accel_any_motion_setup,
1395 },
1396 };
1397
bmc150_accel_unregister_triggers(struct bmc150_accel_data * data,int from)1398 static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
1399 int from)
1400 {
1401 int i;
1402
1403 for (i = from; i >= 0; i--) {
1404 if (data->triggers[i].indio_trig) {
1405 iio_trigger_unregister(data->triggers[i].indio_trig);
1406 data->triggers[i].indio_trig = NULL;
1407 }
1408 }
1409 }
1410
bmc150_accel_triggers_setup(struct iio_dev * indio_dev,struct bmc150_accel_data * data)1411 static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
1412 struct bmc150_accel_data *data)
1413 {
1414 struct device *dev = regmap_get_device(data->regmap);
1415 int i, ret;
1416
1417 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1418 struct bmc150_accel_trigger *t = &data->triggers[i];
1419
1420 t->indio_trig = devm_iio_trigger_alloc(dev,
1421 bmc150_accel_triggers[i].name,
1422 indio_dev->name,
1423 iio_device_id(indio_dev));
1424 if (!t->indio_trig) {
1425 ret = -ENOMEM;
1426 break;
1427 }
1428
1429 t->indio_trig->ops = &bmc150_accel_trigger_ops;
1430 t->intr = bmc150_accel_triggers[i].intr;
1431 t->data = data;
1432 t->setup = bmc150_accel_triggers[i].setup;
1433 iio_trigger_set_drvdata(t->indio_trig, t);
1434
1435 ret = iio_trigger_register(t->indio_trig);
1436 if (ret)
1437 break;
1438 }
1439
1440 if (ret)
1441 bmc150_accel_unregister_triggers(data, i - 1);
1442
1443 return ret;
1444 }
1445
1446 #define BMC150_ACCEL_FIFO_MODE_STREAM 0x80
1447 #define BMC150_ACCEL_FIFO_MODE_FIFO 0x40
1448 #define BMC150_ACCEL_FIFO_MODE_BYPASS 0x00
1449
bmc150_accel_fifo_set_mode(struct bmc150_accel_data * data)1450 static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
1451 {
1452 struct device *dev = regmap_get_device(data->regmap);
1453 u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
1454 int ret;
1455
1456 ret = regmap_write(data->regmap, reg, data->fifo_mode);
1457 if (ret < 0) {
1458 dev_err(dev, "Error writing reg_fifo_config1\n");
1459 return ret;
1460 }
1461
1462 if (!data->fifo_mode)
1463 return 0;
1464
1465 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
1466 data->watermark);
1467 if (ret < 0)
1468 dev_err(dev, "Error writing reg_fifo_config0\n");
1469
1470 return ret;
1471 }
1472
bmc150_accel_buffer_preenable(struct iio_dev * indio_dev)1473 static int bmc150_accel_buffer_preenable(struct iio_dev *indio_dev)
1474 {
1475 struct bmc150_accel_data *data = iio_priv(indio_dev);
1476
1477 return bmc150_accel_set_power_state(data, true);
1478 }
1479
bmc150_accel_buffer_postenable(struct iio_dev * indio_dev)1480 static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
1481 {
1482 struct bmc150_accel_data *data = iio_priv(indio_dev);
1483 int ret = 0;
1484
1485 if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
1486 return 0;
1487
1488 mutex_lock(&data->mutex);
1489
1490 if (!data->watermark)
1491 goto out;
1492
1493 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
1494 true);
1495 if (ret)
1496 goto out;
1497
1498 data->fifo_mode = BMC150_ACCEL_FIFO_MODE_FIFO;
1499
1500 ret = bmc150_accel_fifo_set_mode(data);
1501 if (ret) {
1502 data->fifo_mode = 0;
1503 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
1504 false);
1505 }
1506
1507 out:
1508 mutex_unlock(&data->mutex);
1509
1510 return ret;
1511 }
1512
bmc150_accel_buffer_predisable(struct iio_dev * indio_dev)1513 static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
1514 {
1515 struct bmc150_accel_data *data = iio_priv(indio_dev);
1516
1517 if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
1518 return 0;
1519
1520 mutex_lock(&data->mutex);
1521
1522 if (!data->fifo_mode)
1523 goto out;
1524
1525 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, false);
1526 __bmc150_accel_fifo_flush(indio_dev, BMC150_ACCEL_FIFO_LENGTH, false);
1527 data->fifo_mode = 0;
1528 bmc150_accel_fifo_set_mode(data);
1529
1530 out:
1531 mutex_unlock(&data->mutex);
1532
1533 return 0;
1534 }
1535
bmc150_accel_buffer_postdisable(struct iio_dev * indio_dev)1536 static int bmc150_accel_buffer_postdisable(struct iio_dev *indio_dev)
1537 {
1538 struct bmc150_accel_data *data = iio_priv(indio_dev);
1539
1540 return bmc150_accel_set_power_state(data, false);
1541 }
1542
1543 static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
1544 .preenable = bmc150_accel_buffer_preenable,
1545 .postenable = bmc150_accel_buffer_postenable,
1546 .predisable = bmc150_accel_buffer_predisable,
1547 .postdisable = bmc150_accel_buffer_postdisable,
1548 };
1549
bmc150_accel_chip_init(struct bmc150_accel_data * data)1550 static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1551 {
1552 struct device *dev = regmap_get_device(data->regmap);
1553 int ret, i;
1554 unsigned int val;
1555
1556 /*
1557 * Reset chip to get it in a known good state. A delay of 1.8ms after
1558 * reset is required according to the data sheets of supported chips.
1559 */
1560 regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
1561 BMC150_ACCEL_RESET_VAL);
1562 usleep_range(1800, 2500);
1563
1564 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
1565 if (ret < 0) {
1566 dev_err(dev, "Error: Reading chip id\n");
1567 return ret;
1568 }
1569
1570 dev_dbg(dev, "Chip Id %x\n", val);
1571 for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
1572 if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
1573 data->chip_info = &bmc150_accel_chip_info_tbl[i];
1574 break;
1575 }
1576 }
1577
1578 if (!data->chip_info) {
1579 dev_err(dev, "Invalid chip %x\n", val);
1580 return -ENODEV;
1581 }
1582
1583 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1584 if (ret < 0)
1585 return ret;
1586
1587 /* Set Bandwidth */
1588 ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
1589 if (ret < 0)
1590 return ret;
1591
1592 /* Set Default Range */
1593 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
1594 BMC150_ACCEL_DEF_RANGE_4G);
1595 if (ret < 0) {
1596 dev_err(dev, "Error writing reg_pmu_range\n");
1597 return ret;
1598 }
1599
1600 data->range = BMC150_ACCEL_DEF_RANGE_4G;
1601
1602 /* Set default slope duration and thresholds */
1603 data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
1604 data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
1605 ret = bmc150_accel_update_slope(data);
1606 if (ret < 0)
1607 return ret;
1608
1609 /* Set default as latched interrupts */
1610 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1611 BMC150_ACCEL_INT_MODE_LATCH_INT |
1612 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1613 if (ret < 0) {
1614 dev_err(dev, "Error writing reg_int_rst_latch\n");
1615 return ret;
1616 }
1617
1618 return 0;
1619 }
1620
bmc150_accel_core_probe(struct device * dev,struct regmap * regmap,int irq,enum bmc150_type type,const char * name,bool block_supported)1621 int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1622 enum bmc150_type type, const char *name,
1623 bool block_supported)
1624 {
1625 const struct iio_dev_attr **fifo_attrs;
1626 struct bmc150_accel_data *data;
1627 struct iio_dev *indio_dev;
1628 int ret;
1629
1630 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
1631 if (!indio_dev)
1632 return -ENOMEM;
1633
1634 data = iio_priv(indio_dev);
1635 dev_set_drvdata(dev, indio_dev);
1636
1637 data->regmap = regmap;
1638 data->type = type;
1639
1640 if (!bmc150_apply_acpi_orientation(dev, &data->orientation)) {
1641 ret = iio_read_mount_matrix(dev, &data->orientation);
1642 if (ret)
1643 return ret;
1644 }
1645
1646 /*
1647 * VDD is the analog and digital domain voltage supply
1648 * VDDIO is the digital I/O voltage supply
1649 */
1650 data->regulators[0].supply = "vdd";
1651 data->regulators[1].supply = "vddio";
1652 ret = devm_regulator_bulk_get(dev,
1653 ARRAY_SIZE(data->regulators),
1654 data->regulators);
1655 if (ret)
1656 return dev_err_probe(dev, ret, "failed to get regulators\n");
1657
1658 ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
1659 data->regulators);
1660 if (ret) {
1661 dev_err(dev, "failed to enable regulators: %d\n", ret);
1662 return ret;
1663 }
1664 /*
1665 * 2ms or 3ms power-on time according to datasheets, let's better
1666 * be safe than sorry and set this delay to 5ms.
1667 */
1668 msleep(5);
1669
1670 ret = bmc150_accel_chip_init(data);
1671 if (ret < 0)
1672 goto err_disable_regulators;
1673
1674 mutex_init(&data->mutex);
1675
1676 indio_dev->channels = data->chip_info->channels;
1677 indio_dev->num_channels = data->chip_info->num_channels;
1678 indio_dev->name = name ? name : data->chip_info->name;
1679 indio_dev->available_scan_masks = bmc150_accel_scan_masks;
1680 indio_dev->modes = INDIO_DIRECT_MODE;
1681 indio_dev->info = &bmc150_accel_info;
1682
1683 if (block_supported) {
1684 indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
1685 indio_dev->info = &bmc150_accel_info_fifo;
1686 fifo_attrs = bmc150_accel_fifo_attributes;
1687 } else {
1688 fifo_attrs = NULL;
1689 }
1690
1691 ret = iio_triggered_buffer_setup_ext(indio_dev,
1692 &iio_pollfunc_store_time,
1693 bmc150_accel_trigger_handler,
1694 IIO_BUFFER_DIRECTION_IN,
1695 &bmc150_accel_buffer_ops,
1696 fifo_attrs);
1697 if (ret < 0) {
1698 dev_err(dev, "Failed: iio triggered buffer setup\n");
1699 goto err_disable_regulators;
1700 }
1701
1702 if (irq > 0) {
1703 data->irq = irq;
1704 ret = devm_request_threaded_irq(dev, irq,
1705 bmc150_accel_irq_handler,
1706 bmc150_accel_irq_thread_handler,
1707 IRQF_TRIGGER_RISING,
1708 "bmc150_accel_event",
1709 indio_dev);
1710 if (ret)
1711 goto err_buffer_cleanup;
1712
1713 /*
1714 * Set latched mode interrupt. While certain interrupts are
1715 * non-latched regardless of this settings (e.g. new data) we
1716 * want to use latch mode when we can to prevent interrupt
1717 * flooding.
1718 */
1719 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1720 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1721 if (ret < 0) {
1722 dev_err(dev, "Error writing reg_int_rst_latch\n");
1723 goto err_buffer_cleanup;
1724 }
1725
1726 bmc150_accel_interrupts_setup(indio_dev, data, irq);
1727
1728 ret = bmc150_accel_triggers_setup(indio_dev, data);
1729 if (ret)
1730 goto err_buffer_cleanup;
1731 }
1732
1733 ret = pm_runtime_set_active(dev);
1734 if (ret)
1735 goto err_trigger_unregister;
1736
1737 pm_runtime_enable(dev);
1738 pm_runtime_set_autosuspend_delay(dev, BMC150_AUTO_SUSPEND_DELAY_MS);
1739 pm_runtime_use_autosuspend(dev);
1740
1741 ret = iio_device_register(indio_dev);
1742 if (ret < 0) {
1743 dev_err(dev, "Unable to register iio device\n");
1744 goto err_pm_cleanup;
1745 }
1746
1747 return 0;
1748
1749 err_pm_cleanup:
1750 pm_runtime_dont_use_autosuspend(dev);
1751 pm_runtime_disable(dev);
1752 err_trigger_unregister:
1753 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1754 err_buffer_cleanup:
1755 iio_triggered_buffer_cleanup(indio_dev);
1756 err_disable_regulators:
1757 regulator_bulk_disable(ARRAY_SIZE(data->regulators),
1758 data->regulators);
1759
1760 return ret;
1761 }
1762 EXPORT_SYMBOL_NS_GPL(bmc150_accel_core_probe, "IIO_BMC150");
1763
bmc150_accel_core_remove(struct device * dev)1764 void bmc150_accel_core_remove(struct device *dev)
1765 {
1766 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1767 struct bmc150_accel_data *data = iio_priv(indio_dev);
1768
1769 iio_device_unregister(indio_dev);
1770
1771 pm_runtime_disable(dev);
1772 pm_runtime_set_suspended(dev);
1773
1774 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1775
1776 iio_triggered_buffer_cleanup(indio_dev);
1777
1778 mutex_lock(&data->mutex);
1779 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0);
1780 mutex_unlock(&data->mutex);
1781
1782 regulator_bulk_disable(ARRAY_SIZE(data->regulators),
1783 data->regulators);
1784 }
1785 EXPORT_SYMBOL_NS_GPL(bmc150_accel_core_remove, "IIO_BMC150");
1786
1787 #ifdef CONFIG_PM_SLEEP
bmc150_accel_suspend(struct device * dev)1788 static int bmc150_accel_suspend(struct device *dev)
1789 {
1790 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1791 struct bmc150_accel_data *data = iio_priv(indio_dev);
1792
1793 mutex_lock(&data->mutex);
1794 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1795 mutex_unlock(&data->mutex);
1796
1797 return 0;
1798 }
1799
bmc150_accel_resume(struct device * dev)1800 static int bmc150_accel_resume(struct device *dev)
1801 {
1802 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1803 struct bmc150_accel_data *data = iio_priv(indio_dev);
1804
1805 mutex_lock(&data->mutex);
1806 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1807 bmc150_accel_fifo_set_mode(data);
1808 mutex_unlock(&data->mutex);
1809
1810 if (data->resume_callback)
1811 data->resume_callback(dev);
1812
1813 return 0;
1814 }
1815 #endif
1816
1817 #ifdef CONFIG_PM
bmc150_accel_runtime_suspend(struct device * dev)1818 static int bmc150_accel_runtime_suspend(struct device *dev)
1819 {
1820 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1821 struct bmc150_accel_data *data = iio_priv(indio_dev);
1822 int ret;
1823
1824 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1825 if (ret < 0)
1826 return -EAGAIN;
1827
1828 return 0;
1829 }
1830
bmc150_accel_runtime_resume(struct device * dev)1831 static int bmc150_accel_runtime_resume(struct device *dev)
1832 {
1833 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1834 struct bmc150_accel_data *data = iio_priv(indio_dev);
1835 int ret;
1836 int sleep_val;
1837
1838 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1839 if (ret < 0)
1840 return ret;
1841 ret = bmc150_accel_fifo_set_mode(data);
1842 if (ret < 0)
1843 return ret;
1844
1845 sleep_val = bmc150_accel_get_startup_times(data);
1846 if (sleep_val < 20)
1847 usleep_range(sleep_val * 1000, 20000);
1848 else
1849 msleep_interruptible(sleep_val);
1850
1851 return 0;
1852 }
1853 #endif
1854
1855 const struct dev_pm_ops bmc150_accel_pm_ops = {
1856 SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume)
1857 SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend,
1858 bmc150_accel_runtime_resume, NULL)
1859 };
1860 EXPORT_SYMBOL_NS_GPL(bmc150_accel_pm_ops, "IIO_BMC150");
1861
1862 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
1863 MODULE_LICENSE("GPL v2");
1864 MODULE_DESCRIPTION("BMC150 accelerometer driver");
1865