1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * 3-axis accelerometer driver supporting many Bosch-Sensortec chips
4 * Copyright (c) 2014, Intel Corporation.
5 */
6
7 #include <linux/module.h>
8 #include <linux/i2c.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/acpi.h>
13 #include <linux/pm.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/property.h>
16 #include <linux/iio/iio.h>
17 #include <linux/iio/sysfs.h>
18 #include <linux/iio/buffer.h>
19 #include <linux/iio/events.h>
20 #include <linux/iio/trigger.h>
21 #include <linux/iio/trigger_consumer.h>
22 #include <linux/iio/triggered_buffer.h>
23 #include <linux/regmap.h>
24 #include <linux/regulator/consumer.h>
25
26 #include "bmc150-accel.h"
27
28 #define BMC150_ACCEL_REG_CHIP_ID 0x00
29
30 #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
31 #define BMC150_ACCEL_ANY_MOTION_MASK 0x07
32 #define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
33 #define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
34 #define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
35 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
36
37 #define BMC150_ACCEL_REG_PMU_LPW 0x11
38 #define BMC150_ACCEL_PMU_MODE_MASK 0xE0
39 #define BMC150_ACCEL_PMU_MODE_SHIFT 5
40 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK 0x17
41 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT 1
42
43 #define BMC150_ACCEL_REG_PMU_RANGE 0x0F
44
45 #define BMC150_ACCEL_DEF_RANGE_2G 0x03
46 #define BMC150_ACCEL_DEF_RANGE_4G 0x05
47 #define BMC150_ACCEL_DEF_RANGE_8G 0x08
48 #define BMC150_ACCEL_DEF_RANGE_16G 0x0C
49
50 /* Default BW: 125Hz */
51 #define BMC150_ACCEL_REG_PMU_BW 0x10
52 #define BMC150_ACCEL_DEF_BW 125
53
54 #define BMC150_ACCEL_REG_RESET 0x14
55 #define BMC150_ACCEL_RESET_VAL 0xB6
56
57 #define BMC150_ACCEL_REG_INT_MAP_0 0x19
58 #define BMC150_ACCEL_INT_MAP_0_BIT_INT1_SLOPE BIT(2)
59
60 #define BMC150_ACCEL_REG_INT_MAP_1 0x1A
61 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_DATA BIT(0)
62 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_FWM BIT(1)
63 #define BMC150_ACCEL_INT_MAP_1_BIT_INT1_FFULL BIT(2)
64 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_FFULL BIT(5)
65 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_FWM BIT(6)
66 #define BMC150_ACCEL_INT_MAP_1_BIT_INT2_DATA BIT(7)
67
68 #define BMC150_ACCEL_REG_INT_MAP_2 0x1B
69 #define BMC150_ACCEL_INT_MAP_2_BIT_INT2_SLOPE BIT(2)
70
71 #define BMC150_ACCEL_REG_INT_RST_LATCH 0x21
72 #define BMC150_ACCEL_INT_MODE_LATCH_RESET 0x80
73 #define BMC150_ACCEL_INT_MODE_LATCH_INT 0x0F
74 #define BMC150_ACCEL_INT_MODE_NON_LATCH_INT 0x00
75
76 #define BMC150_ACCEL_REG_INT_EN_0 0x16
77 #define BMC150_ACCEL_INT_EN_BIT_SLP_X BIT(0)
78 #define BMC150_ACCEL_INT_EN_BIT_SLP_Y BIT(1)
79 #define BMC150_ACCEL_INT_EN_BIT_SLP_Z BIT(2)
80
81 #define BMC150_ACCEL_REG_INT_EN_1 0x17
82 #define BMC150_ACCEL_INT_EN_BIT_DATA_EN BIT(4)
83 #define BMC150_ACCEL_INT_EN_BIT_FFULL_EN BIT(5)
84 #define BMC150_ACCEL_INT_EN_BIT_FWM_EN BIT(6)
85
86 #define BMC150_ACCEL_REG_INT_OUT_CTRL 0x20
87 #define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL BIT(0)
88 #define BMC150_ACCEL_INT_OUT_CTRL_INT2_LVL BIT(2)
89
90 #define BMC150_ACCEL_REG_INT_5 0x27
91 #define BMC150_ACCEL_SLOPE_DUR_MASK 0x03
92
93 #define BMC150_ACCEL_REG_INT_6 0x28
94 #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
95
96 /* Slope duration in terms of number of samples */
97 #define BMC150_ACCEL_DEF_SLOPE_DURATION 1
98 /* in terms of multiples of g's/LSB, based on range */
99 #define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
100
101 #define BMC150_ACCEL_REG_XOUT_L 0x02
102
103 #define BMC150_ACCEL_MAX_STARTUP_TIME_MS 100
104
105 /* Sleep Duration values */
106 #define BMC150_ACCEL_SLEEP_500_MICRO 0x05
107 #define BMC150_ACCEL_SLEEP_1_MS 0x06
108 #define BMC150_ACCEL_SLEEP_2_MS 0x07
109 #define BMC150_ACCEL_SLEEP_4_MS 0x08
110 #define BMC150_ACCEL_SLEEP_6_MS 0x09
111 #define BMC150_ACCEL_SLEEP_10_MS 0x0A
112 #define BMC150_ACCEL_SLEEP_25_MS 0x0B
113 #define BMC150_ACCEL_SLEEP_50_MS 0x0C
114 #define BMC150_ACCEL_SLEEP_100_MS 0x0D
115 #define BMC150_ACCEL_SLEEP_500_MS 0x0E
116 #define BMC150_ACCEL_SLEEP_1_SEC 0x0F
117
118 #define BMC150_ACCEL_REG_TEMP 0x08
119 #define BMC150_ACCEL_TEMP_CENTER_VAL 23
120
121 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
122 #define BMC150_AUTO_SUSPEND_DELAY_MS 2000
123
124 #define BMC150_ACCEL_REG_FIFO_STATUS 0x0E
125 #define BMC150_ACCEL_REG_FIFO_CONFIG0 0x30
126 #define BMC150_ACCEL_REG_FIFO_CONFIG1 0x3E
127 #define BMC150_ACCEL_REG_FIFO_DATA 0x3F
128 #define BMC150_ACCEL_FIFO_LENGTH 32
129
130 enum bmc150_accel_axis {
131 AXIS_X,
132 AXIS_Y,
133 AXIS_Z,
134 AXIS_MAX,
135 };
136
137 enum bmc150_power_modes {
138 BMC150_ACCEL_SLEEP_MODE_NORMAL,
139 BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND,
140 BMC150_ACCEL_SLEEP_MODE_LPM,
141 BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04,
142 };
143
144 struct bmc150_scale_info {
145 int scale;
146 u8 reg_range;
147 };
148
149 struct bmc150_accel_chip_info {
150 const char *name;
151 u8 chip_id;
152 const struct iio_chan_spec *channels;
153 int num_channels;
154 const struct bmc150_scale_info scale_table[4];
155 };
156
157 static const struct {
158 int val;
159 int val2;
160 u8 bw_bits;
161 } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
162 {31, 260000, 0x09},
163 {62, 500000, 0x0A},
164 {125, 0, 0x0B},
165 {250, 0, 0x0C},
166 {500, 0, 0x0D},
167 {1000, 0, 0x0E},
168 {2000, 0, 0x0F} };
169
170 static __maybe_unused const struct {
171 int bw_bits;
172 int msec;
173 } bmc150_accel_sample_upd_time[] = { {0x08, 64},
174 {0x09, 32},
175 {0x0A, 16},
176 {0x0B, 8},
177 {0x0C, 4},
178 {0x0D, 2},
179 {0x0E, 1},
180 {0x0F, 1} };
181
182 static const struct {
183 int sleep_dur;
184 u8 reg_value;
185 } bmc150_accel_sleep_value_table[] = { {0, 0},
186 {500, BMC150_ACCEL_SLEEP_500_MICRO},
187 {1000, BMC150_ACCEL_SLEEP_1_MS},
188 {2000, BMC150_ACCEL_SLEEP_2_MS},
189 {4000, BMC150_ACCEL_SLEEP_4_MS},
190 {6000, BMC150_ACCEL_SLEEP_6_MS},
191 {10000, BMC150_ACCEL_SLEEP_10_MS},
192 {25000, BMC150_ACCEL_SLEEP_25_MS},
193 {50000, BMC150_ACCEL_SLEEP_50_MS},
194 {100000, BMC150_ACCEL_SLEEP_100_MS},
195 {500000, BMC150_ACCEL_SLEEP_500_MS},
196 {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
197
198 const struct regmap_config bmc150_regmap_conf = {
199 .reg_bits = 8,
200 .val_bits = 8,
201 .max_register = 0x3f,
202 };
203 EXPORT_SYMBOL_NS_GPL(bmc150_regmap_conf, "IIO_BMC150");
204
bmc150_accel_set_mode(struct bmc150_accel_data * data,enum bmc150_power_modes mode,int dur_us)205 static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
206 enum bmc150_power_modes mode,
207 int dur_us)
208 {
209 struct device *dev = regmap_get_device(data->regmap);
210 int i;
211 int ret;
212 u8 lpw_bits;
213 int dur_val = -1;
214
215 if (dur_us > 0) {
216 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table);
217 ++i) {
218 if (bmc150_accel_sleep_value_table[i].sleep_dur ==
219 dur_us)
220 dur_val =
221 bmc150_accel_sleep_value_table[i].reg_value;
222 }
223 } else {
224 dur_val = 0;
225 }
226
227 if (dur_val < 0)
228 return -EINVAL;
229
230 lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
231 lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
232
233 dev_dbg(dev, "Set Mode bits %x\n", lpw_bits);
234
235 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
236 if (ret < 0) {
237 dev_err(dev, "Error writing reg_pmu_lpw\n");
238 return ret;
239 }
240
241 return 0;
242 }
243
bmc150_accel_set_bw(struct bmc150_accel_data * data,int val,int val2)244 static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
245 int val2)
246 {
247 int i;
248 int ret;
249
250 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
251 if (bmc150_accel_samp_freq_table[i].val == val &&
252 bmc150_accel_samp_freq_table[i].val2 == val2) {
253 ret = regmap_write(data->regmap,
254 BMC150_ACCEL_REG_PMU_BW,
255 bmc150_accel_samp_freq_table[i].bw_bits);
256 if (ret < 0)
257 return ret;
258
259 data->bw_bits =
260 bmc150_accel_samp_freq_table[i].bw_bits;
261 return 0;
262 }
263 }
264
265 return -EINVAL;
266 }
267
bmc150_accel_update_slope(struct bmc150_accel_data * data)268 static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
269 {
270 struct device *dev = regmap_get_device(data->regmap);
271 int ret;
272
273 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
274 data->slope_thres);
275 if (ret < 0) {
276 dev_err(dev, "Error writing reg_int_6\n");
277 return ret;
278 }
279
280 ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
281 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
282 if (ret < 0) {
283 dev_err(dev, "Error updating reg_int_5\n");
284 return ret;
285 }
286
287 dev_dbg(dev, "%x %x\n", data->slope_thres, data->slope_dur);
288
289 return ret;
290 }
291
bmc150_accel_any_motion_setup(struct bmc150_accel_trigger * t,bool state)292 static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t,
293 bool state)
294 {
295 if (state)
296 return bmc150_accel_update_slope(t->data);
297
298 return 0;
299 }
300
bmc150_accel_get_bw(struct bmc150_accel_data * data,int * val,int * val2)301 static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
302 int *val2)
303 {
304 int i;
305
306 for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
307 if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) {
308 *val = bmc150_accel_samp_freq_table[i].val;
309 *val2 = bmc150_accel_samp_freq_table[i].val2;
310 return IIO_VAL_INT_PLUS_MICRO;
311 }
312 }
313
314 return -EINVAL;
315 }
316
317 #ifdef CONFIG_PM
bmc150_accel_get_startup_times(struct bmc150_accel_data * data)318 static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
319 {
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) {
323 if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits)
324 return bmc150_accel_sample_upd_time[i].msec;
325 }
326
327 return BMC150_ACCEL_MAX_STARTUP_TIME_MS;
328 }
329
bmc150_accel_set_power_state(struct bmc150_accel_data * data,bool on)330 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
331 {
332 struct device *dev = regmap_get_device(data->regmap);
333 int ret;
334
335 if (on) {
336 ret = pm_runtime_resume_and_get(dev);
337 } else {
338 pm_runtime_mark_last_busy(dev);
339 ret = pm_runtime_put_autosuspend(dev);
340 }
341
342 if (ret < 0) {
343 dev_err(dev,
344 "Failed: %s for %d\n", __func__, on);
345 return ret;
346 }
347
348 return 0;
349 }
350 #else
bmc150_accel_set_power_state(struct bmc150_accel_data * data,bool on)351 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
352 {
353 return 0;
354 }
355 #endif
356
357 #ifdef CONFIG_ACPI
358 /*
359 * Support for getting accelerometer information from BOSC0200 ACPI nodes.
360 *
361 * There are 2 variants of the BOSC0200 ACPI node. Some 2-in-1s with 360 degree
362 * hinges declare 2 I2C ACPI-resources for 2 accelerometers, 1 in the display
363 * and 1 in the base of the 2-in-1. On these 2-in-1s the ROMS ACPI object
364 * contains the mount-matrix for the sensor in the display and ROMK contains
365 * the mount-matrix for the sensor in the base. On devices using a single
366 * sensor there is a ROTM ACPI object which contains the mount-matrix.
367 *
368 * Here is an incomplete list of devices known to use 1 of these setups:
369 *
370 * Yoga devices with 2 accelerometers using ROMS + ROMK for the mount-matrices:
371 * Lenovo Thinkpad Yoga 11e 3th gen
372 * Lenovo Thinkpad Yoga 11e 4th gen
373 *
374 * Tablets using a single accelerometer using ROTM for the mount-matrix:
375 * Chuwi Hi8 Pro (CWI513)
376 * Chuwi Vi8 Plus (CWI519)
377 * Chuwi Hi13
378 * Irbis TW90
379 * Jumper EZpad mini 3
380 * Onda V80 plus
381 * Predia Basic Tablet
382 */
bmc150_apply_bosc0200_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)383 static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
384 struct iio_mount_matrix *orientation)
385 {
386 struct iio_dev *indio_dev = dev_get_drvdata(dev);
387 acpi_handle handle = ACPI_HANDLE(dev);
388 char *name, *alt_name, *label;
389
390 if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) {
391 alt_name = "ROMK";
392 label = "accel-base";
393 } else {
394 alt_name = "ROMS";
395 label = "accel-display";
396 }
397
398 if (acpi_has_method(handle, "ROTM")) {
399 name = "ROTM";
400 } else if (acpi_has_method(handle, alt_name)) {
401 name = alt_name;
402 indio_dev->label = label;
403 } else {
404 return false;
405 }
406
407 return iio_read_acpi_mount_matrix(dev, orientation, name);
408 }
409
bmc150_apply_dual250e_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)410 static bool bmc150_apply_dual250e_acpi_orientation(struct device *dev,
411 struct iio_mount_matrix *orientation)
412 {
413 struct iio_dev *indio_dev = dev_get_drvdata(dev);
414
415 if (strcmp(dev_name(dev), "i2c-DUAL250E:base") == 0)
416 indio_dev->label = "accel-base";
417 else
418 indio_dev->label = "accel-display";
419
420 return false; /* DUAL250E fwnodes have no mount matrix info */
421 }
422
bmc150_apply_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)423 static bool bmc150_apply_acpi_orientation(struct device *dev,
424 struct iio_mount_matrix *orientation)
425 {
426 struct acpi_device *adev = ACPI_COMPANION(dev);
427
428 if (adev && acpi_dev_hid_uid_match(adev, "BOSC0200", NULL))
429 return bmc150_apply_bosc0200_acpi_orientation(dev, orientation);
430
431 if (adev && acpi_dev_hid_uid_match(adev, "DUAL250E", NULL))
432 return bmc150_apply_dual250e_acpi_orientation(dev, orientation);
433
434 return false;
435 }
436 #else
bmc150_apply_acpi_orientation(struct device * dev,struct iio_mount_matrix * orientation)437 static bool bmc150_apply_acpi_orientation(struct device *dev,
438 struct iio_mount_matrix *orientation)
439 {
440 return false;
441 }
442 #endif
443
444 struct bmc150_accel_interrupt_info {
445 u8 map_reg;
446 u8 map_bitmask;
447 u8 en_reg;
448 u8 en_bitmask;
449 };
450
451 static const struct bmc150_accel_interrupt_info
452 bmc150_accel_interrupts_int1[BMC150_ACCEL_INTERRUPTS] = {
453 { /* data ready interrupt */
454 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
455 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT1_DATA,
456 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
457 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN,
458 },
459 { /* motion interrupt */
460 .map_reg = BMC150_ACCEL_REG_INT_MAP_0,
461 .map_bitmask = BMC150_ACCEL_INT_MAP_0_BIT_INT1_SLOPE,
462 .en_reg = BMC150_ACCEL_REG_INT_EN_0,
463 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X |
464 BMC150_ACCEL_INT_EN_BIT_SLP_Y |
465 BMC150_ACCEL_INT_EN_BIT_SLP_Z
466 },
467 { /* fifo watermark interrupt */
468 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
469 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT1_FWM,
470 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
471 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN,
472 },
473 };
474
475 static const struct bmc150_accel_interrupt_info
476 bmc150_accel_interrupts_int2[BMC150_ACCEL_INTERRUPTS] = {
477 { /* data ready interrupt */
478 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
479 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT2_DATA,
480 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
481 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN,
482 },
483 { /* motion interrupt */
484 .map_reg = BMC150_ACCEL_REG_INT_MAP_2,
485 .map_bitmask = BMC150_ACCEL_INT_MAP_2_BIT_INT2_SLOPE,
486 .en_reg = BMC150_ACCEL_REG_INT_EN_0,
487 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X |
488 BMC150_ACCEL_INT_EN_BIT_SLP_Y |
489 BMC150_ACCEL_INT_EN_BIT_SLP_Z
490 },
491 { /* fifo watermark interrupt */
492 .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
493 .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_INT2_FWM,
494 .en_reg = BMC150_ACCEL_REG_INT_EN_1,
495 .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN,
496 },
497 };
498
bmc150_accel_interrupts_setup(struct iio_dev * indio_dev,struct bmc150_accel_data * data,int irq)499 static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
500 struct bmc150_accel_data *data, int irq)
501 {
502 const struct bmc150_accel_interrupt_info *irq_info = NULL;
503 struct device *dev = regmap_get_device(data->regmap);
504 int i;
505
506 /*
507 * For now we map all interrupts to the same output pin.
508 * However, some boards may have just INT2 (and not INT1) connected,
509 * so we try to detect which IRQ it is based on the interrupt-names.
510 * Without interrupt-names, we assume the irq belongs to INT1.
511 */
512 irq_info = bmc150_accel_interrupts_int1;
513 if (data->type == BOSCH_BMC156 ||
514 irq == fwnode_irq_get_byname(dev_fwnode(dev), "INT2"))
515 irq_info = bmc150_accel_interrupts_int2;
516
517 for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++)
518 data->interrupts[i].info = &irq_info[i];
519 }
520
bmc150_accel_set_interrupt(struct bmc150_accel_data * data,int i,bool state)521 static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
522 bool state)
523 {
524 struct device *dev = regmap_get_device(data->regmap);
525 struct bmc150_accel_interrupt *intr = &data->interrupts[i];
526 const struct bmc150_accel_interrupt_info *info = intr->info;
527 int ret;
528
529 if (state) {
530 if (atomic_inc_return(&intr->users) > 1)
531 return 0;
532 } else {
533 if (atomic_dec_return(&intr->users) > 0)
534 return 0;
535 }
536
537 /*
538 * We will expect the enable and disable to do operation in reverse
539 * order. This will happen here anyway, as our resume operation uses
540 * sync mode runtime pm calls. The suspend operation will be delayed
541 * by autosuspend delay.
542 * So the disable operation will still happen in reverse order of
543 * enable operation. When runtime pm is disabled the mode is always on,
544 * so sequence doesn't matter.
545 */
546 ret = bmc150_accel_set_power_state(data, state);
547 if (ret < 0)
548 return ret;
549
550 /* map the interrupt to the appropriate pins */
551 ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
552 (state ? info->map_bitmask : 0));
553 if (ret < 0) {
554 dev_err(dev, "Error updating reg_int_map\n");
555 goto out_fix_power_state;
556 }
557
558 /* enable/disable the interrupt */
559 ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
560 (state ? info->en_bitmask : 0));
561 if (ret < 0) {
562 dev_err(dev, "Error updating reg_int_en\n");
563 goto out_fix_power_state;
564 }
565
566 return 0;
567
568 out_fix_power_state:
569 bmc150_accel_set_power_state(data, false);
570 return ret;
571 }
572
bmc150_accel_set_scale(struct bmc150_accel_data * data,int val)573 static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
574 {
575 struct device *dev = regmap_get_device(data->regmap);
576 int ret, i;
577
578 for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
579 if (data->chip_info->scale_table[i].scale == val) {
580 ret = regmap_write(data->regmap,
581 BMC150_ACCEL_REG_PMU_RANGE,
582 data->chip_info->scale_table[i].reg_range);
583 if (ret < 0) {
584 dev_err(dev, "Error writing pmu_range\n");
585 return ret;
586 }
587
588 data->range = data->chip_info->scale_table[i].reg_range;
589 return 0;
590 }
591 }
592
593 return -EINVAL;
594 }
595
bmc150_accel_get_temp(struct bmc150_accel_data * data,int * val)596 static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
597 {
598 struct device *dev = regmap_get_device(data->regmap);
599 int ret;
600 unsigned int value;
601
602 mutex_lock(&data->mutex);
603
604 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
605 if (ret < 0) {
606 dev_err(dev, "Error reading reg_temp\n");
607 mutex_unlock(&data->mutex);
608 return ret;
609 }
610 *val = sign_extend32(value, 7);
611
612 mutex_unlock(&data->mutex);
613
614 return IIO_VAL_INT;
615 }
616
bmc150_accel_get_axis(struct bmc150_accel_data * data,struct iio_chan_spec const * chan,int * val)617 static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
618 struct iio_chan_spec const *chan,
619 int *val)
620 {
621 struct device *dev = regmap_get_device(data->regmap);
622 int ret;
623 int axis = chan->scan_index;
624 __le16 raw_val;
625
626 mutex_lock(&data->mutex);
627 ret = bmc150_accel_set_power_state(data, true);
628 if (ret < 0) {
629 mutex_unlock(&data->mutex);
630 return ret;
631 }
632
633 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
634 &raw_val, sizeof(raw_val));
635 if (ret < 0) {
636 dev_err(dev, "Error reading axis %d\n", axis);
637 bmc150_accel_set_power_state(data, false);
638 mutex_unlock(&data->mutex);
639 return ret;
640 }
641 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
642 chan->scan_type.realbits - 1);
643 ret = bmc150_accel_set_power_state(data, false);
644 mutex_unlock(&data->mutex);
645 if (ret < 0)
646 return ret;
647
648 return IIO_VAL_INT;
649 }
650
bmc150_accel_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)651 static int bmc150_accel_read_raw(struct iio_dev *indio_dev,
652 struct iio_chan_spec const *chan,
653 int *val, int *val2, long mask)
654 {
655 struct bmc150_accel_data *data = iio_priv(indio_dev);
656 int ret;
657
658 switch (mask) {
659 case IIO_CHAN_INFO_RAW:
660 switch (chan->type) {
661 case IIO_TEMP:
662 return bmc150_accel_get_temp(data, val);
663 case IIO_ACCEL:
664 if (iio_buffer_enabled(indio_dev))
665 return -EBUSY;
666 else
667 return bmc150_accel_get_axis(data, chan, val);
668 default:
669 return -EINVAL;
670 }
671 case IIO_CHAN_INFO_OFFSET:
672 if (chan->type == IIO_TEMP) {
673 *val = BMC150_ACCEL_TEMP_CENTER_VAL;
674 return IIO_VAL_INT;
675 } else {
676 return -EINVAL;
677 }
678 case IIO_CHAN_INFO_SCALE:
679 *val = 0;
680 switch (chan->type) {
681 case IIO_TEMP:
682 *val2 = 500000;
683 return IIO_VAL_INT_PLUS_MICRO;
684 case IIO_ACCEL:
685 {
686 int i;
687 const struct bmc150_scale_info *si;
688 int st_size = ARRAY_SIZE(data->chip_info->scale_table);
689
690 for (i = 0; i < st_size; ++i) {
691 si = &data->chip_info->scale_table[i];
692 if (si->reg_range == data->range) {
693 *val2 = si->scale;
694 return IIO_VAL_INT_PLUS_MICRO;
695 }
696 }
697 return -EINVAL;
698 }
699 default:
700 return -EINVAL;
701 }
702 case IIO_CHAN_INFO_SAMP_FREQ:
703 mutex_lock(&data->mutex);
704 ret = bmc150_accel_get_bw(data, val, val2);
705 mutex_unlock(&data->mutex);
706 return ret;
707 default:
708 return -EINVAL;
709 }
710 }
711
bmc150_accel_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)712 static int bmc150_accel_write_raw(struct iio_dev *indio_dev,
713 struct iio_chan_spec const *chan,
714 int val, int val2, long mask)
715 {
716 struct bmc150_accel_data *data = iio_priv(indio_dev);
717 int ret;
718
719 switch (mask) {
720 case IIO_CHAN_INFO_SAMP_FREQ:
721 mutex_lock(&data->mutex);
722 ret = bmc150_accel_set_bw(data, val, val2);
723 mutex_unlock(&data->mutex);
724 break;
725 case IIO_CHAN_INFO_SCALE:
726 if (val)
727 return -EINVAL;
728
729 mutex_lock(&data->mutex);
730 ret = bmc150_accel_set_scale(data, val2);
731 mutex_unlock(&data->mutex);
732 return ret;
733 default:
734 ret = -EINVAL;
735 }
736
737 return ret;
738 }
739
bmc150_accel_read_event(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,enum iio_event_info info,int * val,int * val2)740 static int bmc150_accel_read_event(struct iio_dev *indio_dev,
741 const struct iio_chan_spec *chan,
742 enum iio_event_type type,
743 enum iio_event_direction dir,
744 enum iio_event_info info,
745 int *val, int *val2)
746 {
747 struct bmc150_accel_data *data = iio_priv(indio_dev);
748
749 *val2 = 0;
750 switch (info) {
751 case IIO_EV_INFO_VALUE:
752 *val = data->slope_thres;
753 break;
754 case IIO_EV_INFO_PERIOD:
755 *val = data->slope_dur;
756 break;
757 default:
758 return -EINVAL;
759 }
760
761 return IIO_VAL_INT;
762 }
763
bmc150_accel_write_event(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,enum iio_event_info info,int val,int val2)764 static int bmc150_accel_write_event(struct iio_dev *indio_dev,
765 const struct iio_chan_spec *chan,
766 enum iio_event_type type,
767 enum iio_event_direction dir,
768 enum iio_event_info info,
769 int val, int val2)
770 {
771 struct bmc150_accel_data *data = iio_priv(indio_dev);
772
773 if (data->ev_enable_state)
774 return -EBUSY;
775
776 switch (info) {
777 case IIO_EV_INFO_VALUE:
778 data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK;
779 break;
780 case IIO_EV_INFO_PERIOD:
781 data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK;
782 break;
783 default:
784 return -EINVAL;
785 }
786
787 return 0;
788 }
789
bmc150_accel_read_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir)790 static int bmc150_accel_read_event_config(struct iio_dev *indio_dev,
791 const struct iio_chan_spec *chan,
792 enum iio_event_type type,
793 enum iio_event_direction dir)
794 {
795 struct bmc150_accel_data *data = iio_priv(indio_dev);
796
797 return data->ev_enable_state;
798 }
799
bmc150_accel_write_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,bool state)800 static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
801 const struct iio_chan_spec *chan,
802 enum iio_event_type type,
803 enum iio_event_direction dir,
804 bool state)
805 {
806 struct bmc150_accel_data *data = iio_priv(indio_dev);
807 int ret;
808
809 if (state == data->ev_enable_state)
810 return 0;
811
812 mutex_lock(&data->mutex);
813
814 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_ANY_MOTION,
815 state);
816 if (ret < 0) {
817 mutex_unlock(&data->mutex);
818 return ret;
819 }
820
821 data->ev_enable_state = state;
822 mutex_unlock(&data->mutex);
823
824 return 0;
825 }
826
bmc150_accel_validate_trigger(struct iio_dev * indio_dev,struct iio_trigger * trig)827 static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
828 struct iio_trigger *trig)
829 {
830 struct bmc150_accel_data *data = iio_priv(indio_dev);
831 int i;
832
833 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
834 if (data->triggers[i].indio_trig == trig)
835 return 0;
836 }
837
838 return -EINVAL;
839 }
840
bmc150_accel_get_fifo_watermark(struct device * dev,struct device_attribute * attr,char * buf)841 static ssize_t bmc150_accel_get_fifo_watermark(struct device *dev,
842 struct device_attribute *attr,
843 char *buf)
844 {
845 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
846 struct bmc150_accel_data *data = iio_priv(indio_dev);
847 int wm;
848
849 mutex_lock(&data->mutex);
850 wm = data->watermark;
851 mutex_unlock(&data->mutex);
852
853 return sprintf(buf, "%d\n", wm);
854 }
855
bmc150_accel_get_fifo_state(struct device * dev,struct device_attribute * attr,char * buf)856 static ssize_t bmc150_accel_get_fifo_state(struct device *dev,
857 struct device_attribute *attr,
858 char *buf)
859 {
860 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
861 struct bmc150_accel_data *data = iio_priv(indio_dev);
862 bool state;
863
864 mutex_lock(&data->mutex);
865 state = data->fifo_mode;
866 mutex_unlock(&data->mutex);
867
868 return sprintf(buf, "%d\n", state);
869 }
870
871 static const struct iio_mount_matrix *
bmc150_accel_get_mount_matrix(const struct iio_dev * indio_dev,const struct iio_chan_spec * chan)872 bmc150_accel_get_mount_matrix(const struct iio_dev *indio_dev,
873 const struct iio_chan_spec *chan)
874 {
875 struct bmc150_accel_data *data = iio_priv(indio_dev);
876
877 return &data->orientation;
878 }
879
880 static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = {
881 IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_accel_get_mount_matrix),
882 { }
883 };
884
885 IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_min, "1");
886 IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_max,
887 __stringify(BMC150_ACCEL_FIFO_LENGTH));
888 static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO,
889 bmc150_accel_get_fifo_state, NULL, 0);
890 static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO,
891 bmc150_accel_get_fifo_watermark, NULL, 0);
892
893 static const struct iio_dev_attr *bmc150_accel_fifo_attributes[] = {
894 &iio_dev_attr_hwfifo_watermark_min,
895 &iio_dev_attr_hwfifo_watermark_max,
896 &iio_dev_attr_hwfifo_watermark,
897 &iio_dev_attr_hwfifo_enabled,
898 NULL,
899 };
900
bmc150_accel_set_watermark(struct iio_dev * indio_dev,unsigned val)901 static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
902 {
903 struct bmc150_accel_data *data = iio_priv(indio_dev);
904
905 if (val > BMC150_ACCEL_FIFO_LENGTH)
906 val = BMC150_ACCEL_FIFO_LENGTH;
907
908 mutex_lock(&data->mutex);
909 data->watermark = val;
910 mutex_unlock(&data->mutex);
911
912 return 0;
913 }
914
915 /*
916 * We must read at least one full frame in one burst, otherwise the rest of the
917 * frame data is discarded.
918 */
bmc150_accel_fifo_transfer(struct bmc150_accel_data * data,char * buffer,int samples)919 static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
920 char *buffer, int samples)
921 {
922 struct device *dev = regmap_get_device(data->regmap);
923 int sample_length = 3 * 2;
924 int ret;
925 int total_length = samples * sample_length;
926
927 ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA,
928 buffer, total_length);
929 if (ret)
930 dev_err(dev,
931 "Error transferring data from fifo: %d\n", ret);
932
933 return ret;
934 }
935
__bmc150_accel_fifo_flush(struct iio_dev * indio_dev,unsigned samples,bool irq)936 static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
937 unsigned samples, bool irq)
938 {
939 struct bmc150_accel_data *data = iio_priv(indio_dev);
940 struct device *dev = regmap_get_device(data->regmap);
941 int ret, i;
942 u8 count;
943 u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
944 int64_t tstamp;
945 uint64_t sample_period;
946 unsigned int val;
947
948 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
949 if (ret < 0) {
950 dev_err(dev, "Error reading reg_fifo_status\n");
951 return ret;
952 }
953
954 count = val & 0x7F;
955
956 if (!count)
957 return 0;
958
959 /*
960 * If we getting called from IRQ handler we know the stored timestamp is
961 * fairly accurate for the last stored sample. Otherwise, if we are
962 * called as a result of a read operation from userspace and hence
963 * before the watermark interrupt was triggered, take a timestamp
964 * now. We can fall anywhere in between two samples so the error in this
965 * case is at most one sample period.
966 */
967 if (!irq) {
968 data->old_timestamp = data->timestamp;
969 data->timestamp = iio_get_time_ns(indio_dev);
970 }
971
972 /*
973 * Approximate timestamps for each of the sample based on the sampling
974 * frequency, timestamp for last sample and number of samples.
975 *
976 * Note that we can't use the current bandwidth settings to compute the
977 * sample period because the sample rate varies with the device
978 * (e.g. between 31.70ms to 32.20ms for a bandwidth of 15.63HZ). That
979 * small variation adds when we store a large number of samples and
980 * creates significant jitter between the last and first samples in
981 * different batches (e.g. 32ms vs 21ms).
982 *
983 * To avoid this issue we compute the actual sample period ourselves
984 * based on the timestamp delta between the last two flush operations.
985 */
986 sample_period = (data->timestamp - data->old_timestamp);
987 do_div(sample_period, count);
988 tstamp = data->timestamp - (count - 1) * sample_period;
989
990 if (samples && count > samples)
991 count = samples;
992
993 ret = bmc150_accel_fifo_transfer(data, (u8 *)buffer, count);
994 if (ret)
995 return ret;
996
997 /*
998 * Ideally we want the IIO core to handle the demux when running in fifo
999 * mode but not when running in triggered buffer mode. Unfortunately
1000 * this does not seem to be possible, so stick with driver demux for
1001 * now.
1002 */
1003 for (i = 0; i < count; i++) {
1004 int j, bit;
1005
1006 j = 0;
1007 iio_for_each_active_channel(indio_dev, bit)
1008 memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
1009 sizeof(data->scan.channels[0]));
1010
1011 iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
1012 tstamp);
1013
1014 tstamp += sample_period;
1015 }
1016
1017 return count;
1018 }
1019
bmc150_accel_fifo_flush(struct iio_dev * indio_dev,unsigned samples)1020 static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples)
1021 {
1022 struct bmc150_accel_data *data = iio_priv(indio_dev);
1023 int ret;
1024
1025 mutex_lock(&data->mutex);
1026 ret = __bmc150_accel_fifo_flush(indio_dev, samples, false);
1027 mutex_unlock(&data->mutex);
1028
1029 return ret;
1030 }
1031
1032 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
1033 "15.620000 31.260000 62.50000 125 250 500 1000 2000");
1034
1035 static struct attribute *bmc150_accel_attributes[] = {
1036 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1037 NULL,
1038 };
1039
1040 static const struct attribute_group bmc150_accel_attrs_group = {
1041 .attrs = bmc150_accel_attributes,
1042 };
1043
1044 static const struct iio_event_spec bmc150_accel_event = {
1045 .type = IIO_EV_TYPE_ROC,
1046 .dir = IIO_EV_DIR_EITHER,
1047 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
1048 BIT(IIO_EV_INFO_ENABLE) |
1049 BIT(IIO_EV_INFO_PERIOD)
1050 };
1051
1052 #define BMC150_ACCEL_CHANNEL(_axis, bits) { \
1053 .type = IIO_ACCEL, \
1054 .modified = 1, \
1055 .channel2 = IIO_MOD_##_axis, \
1056 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
1057 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
1058 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
1059 .scan_index = AXIS_##_axis, \
1060 .scan_type = { \
1061 .sign = 's', \
1062 .realbits = (bits), \
1063 .storagebits = 16, \
1064 .shift = 16 - (bits), \
1065 .endianness = IIO_LE, \
1066 }, \
1067 .ext_info = bmc150_accel_ext_info, \
1068 .event_spec = &bmc150_accel_event, \
1069 .num_event_specs = 1 \
1070 }
1071
1072 #define BMC150_ACCEL_CHANNELS(bits) { \
1073 { \
1074 .type = IIO_TEMP, \
1075 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
1076 BIT(IIO_CHAN_INFO_SCALE) | \
1077 BIT(IIO_CHAN_INFO_OFFSET), \
1078 .scan_index = -1, \
1079 }, \
1080 BMC150_ACCEL_CHANNEL(X, bits), \
1081 BMC150_ACCEL_CHANNEL(Y, bits), \
1082 BMC150_ACCEL_CHANNEL(Z, bits), \
1083 IIO_CHAN_SOFT_TIMESTAMP(3), \
1084 }
1085
1086 static const struct iio_chan_spec bma222e_accel_channels[] =
1087 BMC150_ACCEL_CHANNELS(8);
1088 static const struct iio_chan_spec bma250e_accel_channels[] =
1089 BMC150_ACCEL_CHANNELS(10);
1090 static const struct iio_chan_spec bmc150_accel_channels[] =
1091 BMC150_ACCEL_CHANNELS(12);
1092 static const struct iio_chan_spec bma280_accel_channels[] =
1093 BMC150_ACCEL_CHANNELS(14);
1094
1095 /*
1096 * The range for the Bosch sensors is typically +-2g/4g/8g/16g, distributed
1097 * over the amount of bits (see above). The scale table can be calculated using
1098 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
1099 * e.g. for +-2g and 12 bits: (4 / 2^12) * 9.80665 m/s^2 = 0.0095768... m/s^2
1100 * Multiply 10^6 and round to get the values listed below.
1101 */
1102 static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
1103 {
1104 .name = "BMA222",
1105 .chip_id = 0x03,
1106 .channels = bma222e_accel_channels,
1107 .num_channels = ARRAY_SIZE(bma222e_accel_channels),
1108 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G},
1109 {306458, BMC150_ACCEL_DEF_RANGE_4G},
1110 {612916, BMC150_ACCEL_DEF_RANGE_8G},
1111 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
1112 },
1113 {
1114 .name = "BMA222E",
1115 .chip_id = 0xF8,
1116 .channels = bma222e_accel_channels,
1117 .num_channels = ARRAY_SIZE(bma222e_accel_channels),
1118 .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G},
1119 {306458, BMC150_ACCEL_DEF_RANGE_4G},
1120 {612916, BMC150_ACCEL_DEF_RANGE_8G},
1121 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
1122 },
1123 {
1124 .name = "BMA250E",
1125 .chip_id = 0xF9,
1126 .channels = bma250e_accel_channels,
1127 .num_channels = ARRAY_SIZE(bma250e_accel_channels),
1128 .scale_table = { {38307, BMC150_ACCEL_DEF_RANGE_2G},
1129 {76614, BMC150_ACCEL_DEF_RANGE_4G},
1130 {153229, BMC150_ACCEL_DEF_RANGE_8G},
1131 {306458, BMC150_ACCEL_DEF_RANGE_16G} },
1132 },
1133 {
1134 .name = "BMA253/BMA254/BMA255/BMC150/BMC156/BMI055",
1135 .chip_id = 0xFA,
1136 .channels = bmc150_accel_channels,
1137 .num_channels = ARRAY_SIZE(bmc150_accel_channels),
1138 .scale_table = { {9577, BMC150_ACCEL_DEF_RANGE_2G},
1139 {19154, BMC150_ACCEL_DEF_RANGE_4G},
1140 {38307, BMC150_ACCEL_DEF_RANGE_8G},
1141 {76614, BMC150_ACCEL_DEF_RANGE_16G} },
1142 },
1143 {
1144 .name = "BMA280",
1145 .chip_id = 0xFB,
1146 .channels = bma280_accel_channels,
1147 .num_channels = ARRAY_SIZE(bma280_accel_channels),
1148 .scale_table = { {2394, BMC150_ACCEL_DEF_RANGE_2G},
1149 {4788, BMC150_ACCEL_DEF_RANGE_4G},
1150 {9577, BMC150_ACCEL_DEF_RANGE_8G},
1151 {19154, BMC150_ACCEL_DEF_RANGE_16G} },
1152 },
1153 };
1154
1155 static const struct iio_info bmc150_accel_info = {
1156 .attrs = &bmc150_accel_attrs_group,
1157 .read_raw = bmc150_accel_read_raw,
1158 .write_raw = bmc150_accel_write_raw,
1159 .read_event_value = bmc150_accel_read_event,
1160 .write_event_value = bmc150_accel_write_event,
1161 .write_event_config = bmc150_accel_write_event_config,
1162 .read_event_config = bmc150_accel_read_event_config,
1163 };
1164
1165 static const struct iio_info bmc150_accel_info_fifo = {
1166 .attrs = &bmc150_accel_attrs_group,
1167 .read_raw = bmc150_accel_read_raw,
1168 .write_raw = bmc150_accel_write_raw,
1169 .read_event_value = bmc150_accel_read_event,
1170 .write_event_value = bmc150_accel_write_event,
1171 .write_event_config = bmc150_accel_write_event_config,
1172 .read_event_config = bmc150_accel_read_event_config,
1173 .validate_trigger = bmc150_accel_validate_trigger,
1174 .hwfifo_set_watermark = bmc150_accel_set_watermark,
1175 .hwfifo_flush_to_buffer = bmc150_accel_fifo_flush,
1176 };
1177
1178 static const unsigned long bmc150_accel_scan_masks[] = {
1179 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
1180 0};
1181
bmc150_accel_trigger_handler(int irq,void * p)1182 static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
1183 {
1184 struct iio_poll_func *pf = p;
1185 struct iio_dev *indio_dev = pf->indio_dev;
1186 struct bmc150_accel_data *data = iio_priv(indio_dev);
1187 int ret;
1188
1189 mutex_lock(&data->mutex);
1190 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L,
1191 data->buffer, AXIS_MAX * 2);
1192 mutex_unlock(&data->mutex);
1193 if (ret < 0)
1194 goto err_read;
1195
1196 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
1197 pf->timestamp);
1198 err_read:
1199 iio_trigger_notify_done(indio_dev->trig);
1200
1201 return IRQ_HANDLED;
1202 }
1203
bmc150_accel_trig_reen(struct iio_trigger * trig)1204 static void bmc150_accel_trig_reen(struct iio_trigger *trig)
1205 {
1206 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1207 struct bmc150_accel_data *data = t->data;
1208 struct device *dev = regmap_get_device(data->regmap);
1209 int ret;
1210
1211 /* new data interrupts don't need ack */
1212 if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY])
1213 return;
1214
1215 mutex_lock(&data->mutex);
1216 /* clear any latched interrupt */
1217 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1218 BMC150_ACCEL_INT_MODE_LATCH_INT |
1219 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1220 mutex_unlock(&data->mutex);
1221 if (ret < 0)
1222 dev_err(dev, "Error writing reg_int_rst_latch\n");
1223 }
1224
bmc150_accel_trigger_set_state(struct iio_trigger * trig,bool state)1225 static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
1226 bool state)
1227 {
1228 struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1229 struct bmc150_accel_data *data = t->data;
1230 int ret;
1231
1232 mutex_lock(&data->mutex);
1233
1234 if (t->enabled == state) {
1235 mutex_unlock(&data->mutex);
1236 return 0;
1237 }
1238
1239 if (t->setup) {
1240 ret = t->setup(t, state);
1241 if (ret < 0) {
1242 mutex_unlock(&data->mutex);
1243 return ret;
1244 }
1245 }
1246
1247 ret = bmc150_accel_set_interrupt(data, t->intr, state);
1248 if (ret < 0) {
1249 mutex_unlock(&data->mutex);
1250 return ret;
1251 }
1252
1253 t->enabled = state;
1254
1255 mutex_unlock(&data->mutex);
1256
1257 return ret;
1258 }
1259
1260 static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
1261 .set_trigger_state = bmc150_accel_trigger_set_state,
1262 .reenable = bmc150_accel_trig_reen,
1263 };
1264
bmc150_accel_handle_roc_event(struct iio_dev * indio_dev)1265 static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
1266 {
1267 struct bmc150_accel_data *data = iio_priv(indio_dev);
1268 struct device *dev = regmap_get_device(data->regmap);
1269 int dir;
1270 int ret;
1271 unsigned int val;
1272
1273 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
1274 if (ret < 0) {
1275 dev_err(dev, "Error reading reg_int_status_2\n");
1276 return ret;
1277 }
1278
1279 if (val & BMC150_ACCEL_ANY_MOTION_BIT_SIGN)
1280 dir = IIO_EV_DIR_FALLING;
1281 else
1282 dir = IIO_EV_DIR_RISING;
1283
1284 if (val & BMC150_ACCEL_ANY_MOTION_BIT_X)
1285 iio_push_event(indio_dev,
1286 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1287 0,
1288 IIO_MOD_X,
1289 IIO_EV_TYPE_ROC,
1290 dir),
1291 data->timestamp);
1292
1293 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Y)
1294 iio_push_event(indio_dev,
1295 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1296 0,
1297 IIO_MOD_Y,
1298 IIO_EV_TYPE_ROC,
1299 dir),
1300 data->timestamp);
1301
1302 if (val & BMC150_ACCEL_ANY_MOTION_BIT_Z)
1303 iio_push_event(indio_dev,
1304 IIO_MOD_EVENT_CODE(IIO_ACCEL,
1305 0,
1306 IIO_MOD_Z,
1307 IIO_EV_TYPE_ROC,
1308 dir),
1309 data->timestamp);
1310
1311 return ret;
1312 }
1313
bmc150_accel_irq_thread_handler(int irq,void * private)1314 static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
1315 {
1316 struct iio_dev *indio_dev = private;
1317 struct bmc150_accel_data *data = iio_priv(indio_dev);
1318 struct device *dev = regmap_get_device(data->regmap);
1319 bool ack = false;
1320 int ret;
1321
1322 mutex_lock(&data->mutex);
1323
1324 if (data->fifo_mode) {
1325 ret = __bmc150_accel_fifo_flush(indio_dev,
1326 BMC150_ACCEL_FIFO_LENGTH, true);
1327 if (ret > 0)
1328 ack = true;
1329 }
1330
1331 if (data->ev_enable_state) {
1332 ret = bmc150_accel_handle_roc_event(indio_dev);
1333 if (ret > 0)
1334 ack = true;
1335 }
1336
1337 if (ack) {
1338 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1339 BMC150_ACCEL_INT_MODE_LATCH_INT |
1340 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1341 if (ret)
1342 dev_err(dev, "Error writing reg_int_rst_latch\n");
1343
1344 ret = IRQ_HANDLED;
1345 } else {
1346 ret = IRQ_NONE;
1347 }
1348
1349 mutex_unlock(&data->mutex);
1350
1351 return ret;
1352 }
1353
bmc150_accel_irq_handler(int irq,void * private)1354 static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
1355 {
1356 struct iio_dev *indio_dev = private;
1357 struct bmc150_accel_data *data = iio_priv(indio_dev);
1358 bool ack = false;
1359 int i;
1360
1361 data->old_timestamp = data->timestamp;
1362 data->timestamp = iio_get_time_ns(indio_dev);
1363
1364 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1365 if (data->triggers[i].enabled) {
1366 iio_trigger_poll(data->triggers[i].indio_trig);
1367 ack = true;
1368 break;
1369 }
1370 }
1371
1372 if (data->ev_enable_state || data->fifo_mode)
1373 return IRQ_WAKE_THREAD;
1374
1375 if (ack)
1376 return IRQ_HANDLED;
1377
1378 return IRQ_NONE;
1379 }
1380
1381 static const struct {
1382 int intr;
1383 const char *name;
1384 int (*setup)(struct bmc150_accel_trigger *t, bool state);
1385 } bmc150_accel_triggers[BMC150_ACCEL_TRIGGERS] = {
1386 {
1387 .intr = 0,
1388 .name = "%s-dev%d",
1389 },
1390 {
1391 .intr = 1,
1392 .name = "%s-any-motion-dev%d",
1393 .setup = bmc150_accel_any_motion_setup,
1394 },
1395 };
1396
bmc150_accel_unregister_triggers(struct bmc150_accel_data * data,int from)1397 static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
1398 int from)
1399 {
1400 int i;
1401
1402 for (i = from; i >= 0; i--) {
1403 if (data->triggers[i].indio_trig) {
1404 iio_trigger_unregister(data->triggers[i].indio_trig);
1405 data->triggers[i].indio_trig = NULL;
1406 }
1407 }
1408 }
1409
bmc150_accel_triggers_setup(struct iio_dev * indio_dev,struct bmc150_accel_data * data)1410 static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
1411 struct bmc150_accel_data *data)
1412 {
1413 struct device *dev = regmap_get_device(data->regmap);
1414 int i, ret;
1415
1416 for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1417 struct bmc150_accel_trigger *t = &data->triggers[i];
1418
1419 t->indio_trig = devm_iio_trigger_alloc(dev,
1420 bmc150_accel_triggers[i].name,
1421 indio_dev->name,
1422 iio_device_id(indio_dev));
1423 if (!t->indio_trig) {
1424 ret = -ENOMEM;
1425 break;
1426 }
1427
1428 t->indio_trig->ops = &bmc150_accel_trigger_ops;
1429 t->intr = bmc150_accel_triggers[i].intr;
1430 t->data = data;
1431 t->setup = bmc150_accel_triggers[i].setup;
1432 iio_trigger_set_drvdata(t->indio_trig, t);
1433
1434 ret = iio_trigger_register(t->indio_trig);
1435 if (ret)
1436 break;
1437 }
1438
1439 if (ret)
1440 bmc150_accel_unregister_triggers(data, i - 1);
1441
1442 return ret;
1443 }
1444
1445 #define BMC150_ACCEL_FIFO_MODE_STREAM 0x80
1446 #define BMC150_ACCEL_FIFO_MODE_FIFO 0x40
1447 #define BMC150_ACCEL_FIFO_MODE_BYPASS 0x00
1448
bmc150_accel_fifo_set_mode(struct bmc150_accel_data * data)1449 static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
1450 {
1451 struct device *dev = regmap_get_device(data->regmap);
1452 u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
1453 int ret;
1454
1455 ret = regmap_write(data->regmap, reg, data->fifo_mode);
1456 if (ret < 0) {
1457 dev_err(dev, "Error writing reg_fifo_config1\n");
1458 return ret;
1459 }
1460
1461 if (!data->fifo_mode)
1462 return 0;
1463
1464 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
1465 data->watermark);
1466 if (ret < 0)
1467 dev_err(dev, "Error writing reg_fifo_config0\n");
1468
1469 return ret;
1470 }
1471
bmc150_accel_buffer_preenable(struct iio_dev * indio_dev)1472 static int bmc150_accel_buffer_preenable(struct iio_dev *indio_dev)
1473 {
1474 struct bmc150_accel_data *data = iio_priv(indio_dev);
1475
1476 return bmc150_accel_set_power_state(data, true);
1477 }
1478
bmc150_accel_buffer_postenable(struct iio_dev * indio_dev)1479 static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
1480 {
1481 struct bmc150_accel_data *data = iio_priv(indio_dev);
1482 int ret = 0;
1483
1484 if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
1485 return 0;
1486
1487 mutex_lock(&data->mutex);
1488
1489 if (!data->watermark)
1490 goto out;
1491
1492 ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
1493 true);
1494 if (ret)
1495 goto out;
1496
1497 data->fifo_mode = BMC150_ACCEL_FIFO_MODE_FIFO;
1498
1499 ret = bmc150_accel_fifo_set_mode(data);
1500 if (ret) {
1501 data->fifo_mode = 0;
1502 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
1503 false);
1504 }
1505
1506 out:
1507 mutex_unlock(&data->mutex);
1508
1509 return ret;
1510 }
1511
bmc150_accel_buffer_predisable(struct iio_dev * indio_dev)1512 static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
1513 {
1514 struct bmc150_accel_data *data = iio_priv(indio_dev);
1515
1516 if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
1517 return 0;
1518
1519 mutex_lock(&data->mutex);
1520
1521 if (!data->fifo_mode)
1522 goto out;
1523
1524 bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, false);
1525 __bmc150_accel_fifo_flush(indio_dev, BMC150_ACCEL_FIFO_LENGTH, false);
1526 data->fifo_mode = 0;
1527 bmc150_accel_fifo_set_mode(data);
1528
1529 out:
1530 mutex_unlock(&data->mutex);
1531
1532 return 0;
1533 }
1534
bmc150_accel_buffer_postdisable(struct iio_dev * indio_dev)1535 static int bmc150_accel_buffer_postdisable(struct iio_dev *indio_dev)
1536 {
1537 struct bmc150_accel_data *data = iio_priv(indio_dev);
1538
1539 return bmc150_accel_set_power_state(data, false);
1540 }
1541
1542 static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
1543 .preenable = bmc150_accel_buffer_preenable,
1544 .postenable = bmc150_accel_buffer_postenable,
1545 .predisable = bmc150_accel_buffer_predisable,
1546 .postdisable = bmc150_accel_buffer_postdisable,
1547 };
1548
bmc150_accel_chip_init(struct bmc150_accel_data * data)1549 static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1550 {
1551 struct device *dev = regmap_get_device(data->regmap);
1552 int ret, i;
1553 unsigned int val;
1554
1555 /*
1556 * Reset chip to get it in a known good state. A delay of 1.8ms after
1557 * reset is required according to the data sheets of supported chips.
1558 */
1559 regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
1560 BMC150_ACCEL_RESET_VAL);
1561 usleep_range(1800, 2500);
1562
1563 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
1564 if (ret < 0) {
1565 dev_err(dev, "Error: Reading chip id\n");
1566 return ret;
1567 }
1568
1569 dev_dbg(dev, "Chip Id %x\n", val);
1570 for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
1571 if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
1572 data->chip_info = &bmc150_accel_chip_info_tbl[i];
1573 break;
1574 }
1575 }
1576
1577 if (!data->chip_info) {
1578 dev_err(dev, "Invalid chip %x\n", val);
1579 return -ENODEV;
1580 }
1581
1582 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1583 if (ret < 0)
1584 return ret;
1585
1586 /* Set Bandwidth */
1587 ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
1588 if (ret < 0)
1589 return ret;
1590
1591 /* Set Default Range */
1592 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
1593 BMC150_ACCEL_DEF_RANGE_4G);
1594 if (ret < 0) {
1595 dev_err(dev, "Error writing reg_pmu_range\n");
1596 return ret;
1597 }
1598
1599 data->range = BMC150_ACCEL_DEF_RANGE_4G;
1600
1601 /* Set default slope duration and thresholds */
1602 data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
1603 data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
1604 ret = bmc150_accel_update_slope(data);
1605 if (ret < 0)
1606 return ret;
1607
1608 /* Set default as latched interrupts */
1609 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1610 BMC150_ACCEL_INT_MODE_LATCH_INT |
1611 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1612 if (ret < 0) {
1613 dev_err(dev, "Error writing reg_int_rst_latch\n");
1614 return ret;
1615 }
1616
1617 return 0;
1618 }
1619
bmc150_accel_core_probe(struct device * dev,struct regmap * regmap,int irq,enum bmc150_type type,const char * name,bool block_supported)1620 int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1621 enum bmc150_type type, const char *name,
1622 bool block_supported)
1623 {
1624 const struct iio_dev_attr **fifo_attrs;
1625 struct bmc150_accel_data *data;
1626 struct iio_dev *indio_dev;
1627 int ret;
1628
1629 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
1630 if (!indio_dev)
1631 return -ENOMEM;
1632
1633 data = iio_priv(indio_dev);
1634 dev_set_drvdata(dev, indio_dev);
1635
1636 data->regmap = regmap;
1637 data->type = type;
1638
1639 if (!bmc150_apply_acpi_orientation(dev, &data->orientation)) {
1640 ret = iio_read_mount_matrix(dev, &data->orientation);
1641 if (ret)
1642 return ret;
1643 }
1644
1645 /*
1646 * VDD is the analog and digital domain voltage supply
1647 * VDDIO is the digital I/O voltage supply
1648 */
1649 data->regulators[0].supply = "vdd";
1650 data->regulators[1].supply = "vddio";
1651 ret = devm_regulator_bulk_get(dev,
1652 ARRAY_SIZE(data->regulators),
1653 data->regulators);
1654 if (ret)
1655 return dev_err_probe(dev, ret, "failed to get regulators\n");
1656
1657 ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
1658 data->regulators);
1659 if (ret) {
1660 dev_err(dev, "failed to enable regulators: %d\n", ret);
1661 return ret;
1662 }
1663 /*
1664 * 2ms or 3ms power-on time according to datasheets, let's better
1665 * be safe than sorry and set this delay to 5ms.
1666 */
1667 msleep(5);
1668
1669 ret = bmc150_accel_chip_init(data);
1670 if (ret < 0)
1671 goto err_disable_regulators;
1672
1673 mutex_init(&data->mutex);
1674
1675 indio_dev->channels = data->chip_info->channels;
1676 indio_dev->num_channels = data->chip_info->num_channels;
1677 indio_dev->name = name ? name : data->chip_info->name;
1678 indio_dev->available_scan_masks = bmc150_accel_scan_masks;
1679 indio_dev->modes = INDIO_DIRECT_MODE;
1680 indio_dev->info = &bmc150_accel_info;
1681
1682 if (block_supported) {
1683 indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
1684 indio_dev->info = &bmc150_accel_info_fifo;
1685 fifo_attrs = bmc150_accel_fifo_attributes;
1686 } else {
1687 fifo_attrs = NULL;
1688 }
1689
1690 ret = iio_triggered_buffer_setup_ext(indio_dev,
1691 &iio_pollfunc_store_time,
1692 bmc150_accel_trigger_handler,
1693 IIO_BUFFER_DIRECTION_IN,
1694 &bmc150_accel_buffer_ops,
1695 fifo_attrs);
1696 if (ret < 0) {
1697 dev_err(dev, "Failed: iio triggered buffer setup\n");
1698 goto err_disable_regulators;
1699 }
1700
1701 if (irq > 0) {
1702 ret = devm_request_threaded_irq(dev, irq,
1703 bmc150_accel_irq_handler,
1704 bmc150_accel_irq_thread_handler,
1705 IRQF_TRIGGER_RISING,
1706 "bmc150_accel_event",
1707 indio_dev);
1708 if (ret)
1709 goto err_buffer_cleanup;
1710
1711 /*
1712 * Set latched mode interrupt. While certain interrupts are
1713 * non-latched regardless of this settings (e.g. new data) we
1714 * want to use latch mode when we can to prevent interrupt
1715 * flooding.
1716 */
1717 ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1718 BMC150_ACCEL_INT_MODE_LATCH_RESET);
1719 if (ret < 0) {
1720 dev_err(dev, "Error writing reg_int_rst_latch\n");
1721 goto err_buffer_cleanup;
1722 }
1723
1724 bmc150_accel_interrupts_setup(indio_dev, data, irq);
1725
1726 ret = bmc150_accel_triggers_setup(indio_dev, data);
1727 if (ret)
1728 goto err_buffer_cleanup;
1729 }
1730
1731 ret = pm_runtime_set_active(dev);
1732 if (ret)
1733 goto err_trigger_unregister;
1734
1735 pm_runtime_enable(dev);
1736 pm_runtime_set_autosuspend_delay(dev, BMC150_AUTO_SUSPEND_DELAY_MS);
1737 pm_runtime_use_autosuspend(dev);
1738
1739 ret = iio_device_register(indio_dev);
1740 if (ret < 0) {
1741 dev_err(dev, "Unable to register iio device\n");
1742 goto err_pm_cleanup;
1743 }
1744
1745 return 0;
1746
1747 err_pm_cleanup:
1748 pm_runtime_dont_use_autosuspend(dev);
1749 pm_runtime_disable(dev);
1750 err_trigger_unregister:
1751 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1752 err_buffer_cleanup:
1753 iio_triggered_buffer_cleanup(indio_dev);
1754 err_disable_regulators:
1755 regulator_bulk_disable(ARRAY_SIZE(data->regulators),
1756 data->regulators);
1757
1758 return ret;
1759 }
1760 EXPORT_SYMBOL_NS_GPL(bmc150_accel_core_probe, "IIO_BMC150");
1761
bmc150_accel_core_remove(struct device * dev)1762 void bmc150_accel_core_remove(struct device *dev)
1763 {
1764 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1765 struct bmc150_accel_data *data = iio_priv(indio_dev);
1766
1767 iio_device_unregister(indio_dev);
1768
1769 pm_runtime_disable(dev);
1770 pm_runtime_set_suspended(dev);
1771
1772 bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1773
1774 iio_triggered_buffer_cleanup(indio_dev);
1775
1776 mutex_lock(&data->mutex);
1777 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0);
1778 mutex_unlock(&data->mutex);
1779
1780 regulator_bulk_disable(ARRAY_SIZE(data->regulators),
1781 data->regulators);
1782 }
1783 EXPORT_SYMBOL_NS_GPL(bmc150_accel_core_remove, "IIO_BMC150");
1784
1785 #ifdef CONFIG_PM_SLEEP
bmc150_accel_suspend(struct device * dev)1786 static int bmc150_accel_suspend(struct device *dev)
1787 {
1788 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1789 struct bmc150_accel_data *data = iio_priv(indio_dev);
1790
1791 mutex_lock(&data->mutex);
1792 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1793 mutex_unlock(&data->mutex);
1794
1795 return 0;
1796 }
1797
bmc150_accel_resume(struct device * dev)1798 static int bmc150_accel_resume(struct device *dev)
1799 {
1800 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1801 struct bmc150_accel_data *data = iio_priv(indio_dev);
1802
1803 mutex_lock(&data->mutex);
1804 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1805 bmc150_accel_fifo_set_mode(data);
1806 mutex_unlock(&data->mutex);
1807
1808 if (data->resume_callback)
1809 data->resume_callback(dev);
1810
1811 return 0;
1812 }
1813 #endif
1814
1815 #ifdef CONFIG_PM
bmc150_accel_runtime_suspend(struct device * dev)1816 static int bmc150_accel_runtime_suspend(struct device *dev)
1817 {
1818 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1819 struct bmc150_accel_data *data = iio_priv(indio_dev);
1820 int ret;
1821
1822 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1823 if (ret < 0)
1824 return -EAGAIN;
1825
1826 return 0;
1827 }
1828
bmc150_accel_runtime_resume(struct device * dev)1829 static int bmc150_accel_runtime_resume(struct device *dev)
1830 {
1831 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1832 struct bmc150_accel_data *data = iio_priv(indio_dev);
1833 int ret;
1834 int sleep_val;
1835
1836 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1837 if (ret < 0)
1838 return ret;
1839 ret = bmc150_accel_fifo_set_mode(data);
1840 if (ret < 0)
1841 return ret;
1842
1843 sleep_val = bmc150_accel_get_startup_times(data);
1844 if (sleep_val < 20)
1845 usleep_range(sleep_val * 1000, 20000);
1846 else
1847 msleep_interruptible(sleep_val);
1848
1849 return 0;
1850 }
1851 #endif
1852
1853 const struct dev_pm_ops bmc150_accel_pm_ops = {
1854 SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume)
1855 SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend,
1856 bmc150_accel_runtime_resume, NULL)
1857 };
1858 EXPORT_SYMBOL_NS_GPL(bmc150_accel_pm_ops, "IIO_BMC150");
1859
1860 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
1861 MODULE_LICENSE("GPL v2");
1862 MODULE_DESCRIPTION("BMC150 accelerometer driver");
1863