1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/idr.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/mhi.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/wait.h>
21 #include "internal.h"
22
23 #define CREATE_TRACE_POINTS
24 #include "trace.h"
25
26 static DEFINE_IDA(mhi_controller_ida);
27
28 #undef mhi_ee
29 #undef mhi_ee_end
30
31 #define mhi_ee(a, b) [MHI_EE_##a] = b,
32 #define mhi_ee_end(a, b) [MHI_EE_##a] = b,
33
34 const char * const mhi_ee_str[MHI_EE_MAX] = {
35 MHI_EE_LIST
36 };
37
38 #undef dev_st_trans
39 #undef dev_st_trans_end
40
41 #define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b,
42 #define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b,
43
44 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
45 DEV_ST_TRANSITION_LIST
46 };
47
48 #undef ch_state_type
49 #undef ch_state_type_end
50
51 #define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b,
52 #define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b,
53
54 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
55 MHI_CH_STATE_TYPE_LIST
56 };
57
58 #undef mhi_pm_state
59 #undef mhi_pm_state_end
60
61 #define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b,
62 #define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b,
63
64 static const char * const mhi_pm_state_str[] = {
65 MHI_PM_STATE_LIST
66 };
67
to_mhi_pm_state_str(u32 state)68 const char *to_mhi_pm_state_str(u32 state)
69 {
70 int index;
71
72 if (state)
73 index = __fls(state);
74
75 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
76 return "Invalid State";
77
78 return mhi_pm_state_str[index];
79 }
80
serial_number_show(struct device * dev,struct device_attribute * attr,char * buf)81 static ssize_t serial_number_show(struct device *dev,
82 struct device_attribute *attr,
83 char *buf)
84 {
85 struct mhi_device *mhi_dev = to_mhi_device(dev);
86 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
87
88 return sysfs_emit(buf, "Serial Number: %u\n",
89 mhi_cntrl->serial_number);
90 }
91 static DEVICE_ATTR_RO(serial_number);
92
oem_pk_hash_show(struct device * dev,struct device_attribute * attr,char * buf)93 static ssize_t oem_pk_hash_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96 {
97 struct mhi_device *mhi_dev = to_mhi_device(dev);
98 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
99 u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS];
100 int i, cnt = 0, ret;
101
102 for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) {
103 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]);
104 if (ret) {
105 dev_err(dev, "Could not capture OEM PK HASH\n");
106 return ret;
107 }
108 }
109
110 for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++)
111 cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]);
112
113 return cnt;
114 }
115 static DEVICE_ATTR_RO(oem_pk_hash);
116
soc_reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)117 static ssize_t soc_reset_store(struct device *dev,
118 struct device_attribute *attr,
119 const char *buf,
120 size_t count)
121 {
122 struct mhi_device *mhi_dev = to_mhi_device(dev);
123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
124
125 mhi_soc_reset(mhi_cntrl);
126 return count;
127 }
128 static DEVICE_ATTR_WO(soc_reset);
129
trigger_edl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)130 static ssize_t trigger_edl_store(struct device *dev,
131 struct device_attribute *attr,
132 const char *buf, size_t count)
133 {
134 struct mhi_device *mhi_dev = to_mhi_device(dev);
135 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
136 unsigned long val;
137 int ret;
138
139 ret = kstrtoul(buf, 10, &val);
140 if (ret < 0)
141 return ret;
142
143 if (!val)
144 return -EINVAL;
145
146 ret = mhi_cntrl->edl_trigger(mhi_cntrl);
147 if (ret)
148 return ret;
149
150 return count;
151 }
152 static DEVICE_ATTR_WO(trigger_edl);
153
154 static struct attribute *mhi_dev_attrs[] = {
155 &dev_attr_serial_number.attr,
156 &dev_attr_oem_pk_hash.attr,
157 &dev_attr_soc_reset.attr,
158 NULL,
159 };
160 ATTRIBUTE_GROUPS(mhi_dev);
161
162 /* MHI protocol requires the transfer ring to be aligned with ring length */
mhi_alloc_aligned_ring(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring,u64 len)163 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
164 struct mhi_ring *ring,
165 u64 len)
166 {
167 ring->alloc_size = len + (len - 1);
168 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
169 &ring->dma_handle, GFP_KERNEL);
170 if (!ring->pre_aligned)
171 return -ENOMEM;
172
173 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
174 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
175
176 return 0;
177 }
178
mhi_deinit_free_irq(struct mhi_controller * mhi_cntrl)179 static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
180 {
181 int i;
182 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
183
184 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
185 if (mhi_event->offload_ev)
186 continue;
187
188 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
189 }
190
191 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
192 }
193
mhi_init_irq_setup(struct mhi_controller * mhi_cntrl)194 static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
195 {
196 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
197 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
198 int i, ret;
199
200 /* if controller driver has set irq_flags, use it */
201 if (mhi_cntrl->irq_flags)
202 irq_flags = mhi_cntrl->irq_flags;
203
204 /* Setup BHI_INTVEC IRQ */
205 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
206 mhi_intvec_threaded_handler,
207 irq_flags,
208 "bhi", mhi_cntrl);
209 if (ret)
210 return ret;
211 /*
212 * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
213 * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
214 * IRQ_NOAUTOEN is not applicable.
215 */
216 disable_irq(mhi_cntrl->irq[0]);
217
218 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
219 if (mhi_event->offload_ev)
220 continue;
221
222 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
223 dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
224 mhi_event->irq);
225 ret = -EINVAL;
226 goto error_request;
227 }
228
229 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
230 mhi_irq_handler,
231 irq_flags,
232 "mhi", mhi_event);
233 if (ret) {
234 dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
235 mhi_cntrl->irq[mhi_event->irq], i);
236 goto error_request;
237 }
238
239 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
240 }
241
242 return 0;
243
244 error_request:
245 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
246 if (mhi_event->offload_ev)
247 continue;
248
249 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
250 }
251 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
252
253 return ret;
254 }
255
mhi_deinit_dev_ctxt(struct mhi_controller * mhi_cntrl)256 static void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
257 {
258 int i;
259 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
260 struct mhi_cmd *mhi_cmd;
261 struct mhi_event *mhi_event;
262 struct mhi_ring *ring;
263
264 mhi_cmd = mhi_cntrl->mhi_cmd;
265 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
266 ring = &mhi_cmd->ring;
267 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
268 ring->pre_aligned, ring->dma_handle);
269 ring->base = NULL;
270 ring->iommu_base = 0;
271 }
272
273 dma_free_coherent(mhi_cntrl->cntrl_dev,
274 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
275 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
276
277 mhi_event = mhi_cntrl->mhi_event;
278 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
279 if (mhi_event->offload_ev)
280 continue;
281
282 ring = &mhi_event->ring;
283 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
284 ring->pre_aligned, ring->dma_handle);
285 ring->base = NULL;
286 ring->iommu_base = 0;
287 }
288
289 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
290 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
291 mhi_ctxt->er_ctxt_addr);
292
293 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
294 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
295 mhi_ctxt->chan_ctxt_addr);
296
297 kfree(mhi_ctxt);
298 mhi_cntrl->mhi_ctxt = NULL;
299 }
300
mhi_init_dev_ctxt(struct mhi_controller * mhi_cntrl)301 static int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
302 {
303 struct mhi_ctxt *mhi_ctxt;
304 struct mhi_chan_ctxt *chan_ctxt;
305 struct mhi_event_ctxt *er_ctxt;
306 struct mhi_cmd_ctxt *cmd_ctxt;
307 struct mhi_chan *mhi_chan;
308 struct mhi_event *mhi_event;
309 struct mhi_cmd *mhi_cmd;
310 u32 tmp;
311 int ret = -ENOMEM, i;
312
313 atomic_set(&mhi_cntrl->dev_wake, 0);
314 atomic_set(&mhi_cntrl->pending_pkts, 0);
315
316 mhi_ctxt = kzalloc_obj(*mhi_ctxt);
317 if (!mhi_ctxt)
318 return -ENOMEM;
319
320 /* Setup channel ctxt */
321 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
322 sizeof(*mhi_ctxt->chan_ctxt) *
323 mhi_cntrl->max_chan,
324 &mhi_ctxt->chan_ctxt_addr,
325 GFP_KERNEL);
326 if (!mhi_ctxt->chan_ctxt)
327 goto error_alloc_chan_ctxt;
328
329 mhi_chan = mhi_cntrl->mhi_chan;
330 chan_ctxt = mhi_ctxt->chan_ctxt;
331 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
332 /* Skip if it is an offload channel */
333 if (mhi_chan->offload_ch)
334 continue;
335
336 tmp = le32_to_cpu(chan_ctxt->chcfg);
337 tmp &= ~CHAN_CTX_CHSTATE_MASK;
338 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
339 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
340 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
341 tmp &= ~CHAN_CTX_POLLCFG_MASK;
342 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
343 chan_ctxt->chcfg = cpu_to_le32(tmp);
344
345 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
346 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
347
348 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
349 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
350 }
351
352 /* Setup event context */
353 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
354 sizeof(*mhi_ctxt->er_ctxt) *
355 mhi_cntrl->total_ev_rings,
356 &mhi_ctxt->er_ctxt_addr,
357 GFP_KERNEL);
358 if (!mhi_ctxt->er_ctxt)
359 goto error_alloc_er_ctxt;
360
361 er_ctxt = mhi_ctxt->er_ctxt;
362 mhi_event = mhi_cntrl->mhi_event;
363 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
364 mhi_event++) {
365 struct mhi_ring *ring = &mhi_event->ring;
366
367 /* Skip if it is an offload event */
368 if (mhi_event->offload_ev)
369 continue;
370
371 tmp = le32_to_cpu(er_ctxt->intmod);
372 tmp &= ~EV_CTX_INTMODC_MASK;
373 tmp &= ~EV_CTX_INTMODT_MASK;
374 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
375 er_ctxt->intmod = cpu_to_le32(tmp);
376
377 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
378 er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
379 mhi_event->db_cfg.db_mode = true;
380
381 ring->el_size = sizeof(struct mhi_ring_element);
382 ring->len = ring->el_size * ring->elements;
383 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
384 if (ret)
385 goto error_alloc_er;
386
387 /*
388 * If the read pointer equals to the write pointer, then the
389 * ring is empty
390 */
391 ring->rp = ring->wp = ring->base;
392 er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
393 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
394 er_ctxt->rlen = cpu_to_le64(ring->len);
395 ring->ctxt_wp = &er_ctxt->wp;
396 }
397
398 /* Setup cmd context */
399 ret = -ENOMEM;
400 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
401 sizeof(*mhi_ctxt->cmd_ctxt) *
402 NR_OF_CMD_RINGS,
403 &mhi_ctxt->cmd_ctxt_addr,
404 GFP_KERNEL);
405 if (!mhi_ctxt->cmd_ctxt)
406 goto error_alloc_er;
407
408 mhi_cmd = mhi_cntrl->mhi_cmd;
409 cmd_ctxt = mhi_ctxt->cmd_ctxt;
410 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
411 struct mhi_ring *ring = &mhi_cmd->ring;
412
413 ring->el_size = sizeof(struct mhi_ring_element);
414 ring->elements = CMD_EL_PER_RING;
415 ring->len = ring->el_size * ring->elements;
416 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
417 if (ret)
418 goto error_alloc_cmd;
419
420 ring->rp = ring->wp = ring->base;
421 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
422 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
423 cmd_ctxt->rlen = cpu_to_le64(ring->len);
424 ring->ctxt_wp = &cmd_ctxt->wp;
425 }
426
427 mhi_cntrl->mhi_ctxt = mhi_ctxt;
428
429 return 0;
430
431 error_alloc_cmd:
432 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
433 struct mhi_ring *ring = &mhi_cmd->ring;
434
435 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
436 ring->pre_aligned, ring->dma_handle);
437 }
438 dma_free_coherent(mhi_cntrl->cntrl_dev,
439 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
440 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
441 i = mhi_cntrl->total_ev_rings;
442 mhi_event = mhi_cntrl->mhi_event + i;
443
444 error_alloc_er:
445 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
446 struct mhi_ring *ring = &mhi_event->ring;
447
448 if (mhi_event->offload_ev)
449 continue;
450
451 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
452 ring->pre_aligned, ring->dma_handle);
453 }
454 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
455 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
456 mhi_ctxt->er_ctxt_addr);
457
458 error_alloc_er_ctxt:
459 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
460 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
461 mhi_ctxt->chan_ctxt_addr);
462
463 error_alloc_chan_ctxt:
464 kfree(mhi_ctxt);
465
466 return ret;
467 }
468
mhi_init_mmio(struct mhi_controller * mhi_cntrl)469 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
470 {
471 u32 val;
472 int i, ret;
473 struct mhi_chan *mhi_chan;
474 struct mhi_event *mhi_event;
475 void __iomem *base = mhi_cntrl->regs;
476 struct device *dev = &mhi_cntrl->mhi_dev->dev;
477 struct {
478 u32 offset;
479 u32 val;
480 } reg_info[] = {
481 {
482 CCABAP_HIGHER,
483 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
484 },
485 {
486 CCABAP_LOWER,
487 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
488 },
489 {
490 ECABAP_HIGHER,
491 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
492 },
493 {
494 ECABAP_LOWER,
495 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
496 },
497 {
498 CRCBAP_HIGHER,
499 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
500 },
501 {
502 CRCBAP_LOWER,
503 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
504 },
505 {
506 MHICTRLBASE_HIGHER,
507 upper_32_bits(mhi_cntrl->iova_start),
508 },
509 {
510 MHICTRLBASE_LOWER,
511 lower_32_bits(mhi_cntrl->iova_start),
512 },
513 {
514 MHIDATABASE_HIGHER,
515 upper_32_bits(mhi_cntrl->iova_start),
516 },
517 {
518 MHIDATABASE_LOWER,
519 lower_32_bits(mhi_cntrl->iova_start),
520 },
521 {
522 MHICTRLLIMIT_HIGHER,
523 upper_32_bits(mhi_cntrl->iova_stop),
524 },
525 {
526 MHICTRLLIMIT_LOWER,
527 lower_32_bits(mhi_cntrl->iova_stop),
528 },
529 {
530 MHIDATALIMIT_HIGHER,
531 upper_32_bits(mhi_cntrl->iova_stop),
532 },
533 {
534 MHIDATALIMIT_LOWER,
535 lower_32_bits(mhi_cntrl->iova_stop),
536 },
537 {0, 0}
538 };
539
540 dev_dbg(dev, "Initializing MHI registers\n");
541
542 /* Read channel db offset */
543 ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
544 if (ret)
545 return ret;
546
547 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
548 dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
549 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
550 return -ERANGE;
551 }
552
553 /* Setup wake db */
554 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
555 mhi_cntrl->wake_set = false;
556
557 /* Setup channel db address for each channel in tre_ring */
558 mhi_chan = mhi_cntrl->mhi_chan;
559 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
560 mhi_chan->tre_ring.db_addr = base + val;
561
562 /* Read event ring db offset */
563 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
564 if (ret) {
565 dev_err(dev, "Unable to read ERDBOFF register\n");
566 return -EIO;
567 }
568
569 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
570 dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
571 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
572 return -ERANGE;
573 }
574
575 /* Setup event db address for each ev_ring */
576 mhi_event = mhi_cntrl->mhi_event;
577 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
578 if (mhi_event->offload_ev)
579 continue;
580
581 mhi_event->ring.db_addr = base + val;
582 }
583
584 /* Setup DB register for primary CMD rings */
585 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
586
587 /* Write to MMIO registers */
588 for (i = 0; reg_info[i].offset; i++)
589 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
590 reg_info[i].val);
591
592 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
593 mhi_cntrl->total_ev_rings);
594 if (ret) {
595 dev_err(dev, "Unable to write MHICFG register\n");
596 return ret;
597 }
598
599 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
600 mhi_cntrl->hw_ev_rings);
601 if (ret) {
602 dev_err(dev, "Unable to write MHICFG register\n");
603 return ret;
604 }
605
606 return 0;
607 }
608
mhi_deinit_chan_ctxt(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)609 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
610 struct mhi_chan *mhi_chan)
611 {
612 struct mhi_ring *buf_ring;
613 struct mhi_ring *tre_ring;
614 struct mhi_chan_ctxt *chan_ctxt;
615 u32 tmp;
616
617 buf_ring = &mhi_chan->buf_ring;
618 tre_ring = &mhi_chan->tre_ring;
619 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
620
621 if (!chan_ctxt->rbase) /* Already uninitialized */
622 return;
623
624 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
625 tre_ring->pre_aligned, tre_ring->dma_handle);
626 vfree(buf_ring->base);
627
628 buf_ring->base = tre_ring->base = NULL;
629 tre_ring->ctxt_wp = NULL;
630 chan_ctxt->rbase = 0;
631 chan_ctxt->rlen = 0;
632 chan_ctxt->rp = 0;
633 chan_ctxt->wp = 0;
634
635 tmp = le32_to_cpu(chan_ctxt->chcfg);
636 tmp &= ~CHAN_CTX_CHSTATE_MASK;
637 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
638 chan_ctxt->chcfg = cpu_to_le32(tmp);
639
640 /* Update to all cores */
641 smp_wmb();
642 }
643
mhi_init_chan_ctxt(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)644 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
645 struct mhi_chan *mhi_chan)
646 {
647 struct mhi_ring *buf_ring;
648 struct mhi_ring *tre_ring;
649 struct mhi_chan_ctxt *chan_ctxt;
650 u32 tmp;
651 int ret;
652
653 buf_ring = &mhi_chan->buf_ring;
654 tre_ring = &mhi_chan->tre_ring;
655 tre_ring->el_size = sizeof(struct mhi_ring_element);
656 tre_ring->len = tre_ring->el_size * tre_ring->elements;
657 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
658 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
659 if (ret)
660 return -ENOMEM;
661
662 buf_ring->el_size = sizeof(struct mhi_buf_info);
663 buf_ring->len = buf_ring->el_size * buf_ring->elements;
664 buf_ring->base = vzalloc(buf_ring->len);
665
666 if (!buf_ring->base) {
667 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
668 tre_ring->pre_aligned, tre_ring->dma_handle);
669 return -ENOMEM;
670 }
671
672 tmp = le32_to_cpu(chan_ctxt->chcfg);
673 tmp &= ~CHAN_CTX_CHSTATE_MASK;
674 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
675 chan_ctxt->chcfg = cpu_to_le32(tmp);
676
677 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
678 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
679 chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
680 tre_ring->ctxt_wp = &chan_ctxt->wp;
681
682 tre_ring->rp = tre_ring->wp = tre_ring->base;
683 buf_ring->rp = buf_ring->wp = buf_ring->base;
684 mhi_chan->db_cfg.db_mode = 1;
685
686 /* Update to all cores */
687 smp_wmb();
688
689 return 0;
690 }
691
parse_ev_cfg(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)692 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
693 const struct mhi_controller_config *config)
694 {
695 struct mhi_event *mhi_event;
696 const struct mhi_event_config *event_cfg;
697 struct device *dev = mhi_cntrl->cntrl_dev;
698 int i, num;
699
700 num = config->num_events;
701 mhi_cntrl->total_ev_rings = num;
702 mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num);
703 if (!mhi_cntrl->mhi_event)
704 return -ENOMEM;
705
706 /* Populate event ring */
707 mhi_event = mhi_cntrl->mhi_event;
708 for (i = 0; i < num; i++) {
709 event_cfg = &config->event_cfg[i];
710
711 mhi_event->er_index = i;
712 mhi_event->ring.elements = event_cfg->num_elements;
713 mhi_event->intmod = event_cfg->irq_moderation_ms;
714 mhi_event->irq = event_cfg->irq;
715
716 if (event_cfg->channel != U32_MAX) {
717 /* This event ring has a dedicated channel */
718 mhi_event->chan = event_cfg->channel;
719 if (mhi_event->chan >= mhi_cntrl->max_chan) {
720 dev_err(dev,
721 "Event Ring channel not available\n");
722 goto error_ev_cfg;
723 }
724
725 mhi_event->mhi_chan =
726 &mhi_cntrl->mhi_chan[mhi_event->chan];
727 }
728
729 /* Priority is fixed to 1 for now */
730 mhi_event->priority = 1;
731
732 mhi_event->db_cfg.brstmode = event_cfg->mode;
733 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
734 goto error_ev_cfg;
735
736 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
737 mhi_event->db_cfg.process_db = mhi_db_brstmode;
738 else
739 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
740
741 mhi_event->data_type = event_cfg->data_type;
742
743 switch (mhi_event->data_type) {
744 case MHI_ER_DATA:
745 mhi_event->process_event = mhi_process_data_event_ring;
746 break;
747 case MHI_ER_CTRL:
748 mhi_event->process_event = mhi_process_ctrl_ev_ring;
749 break;
750 default:
751 dev_err(dev, "Event Ring type not supported\n");
752 goto error_ev_cfg;
753 }
754
755 mhi_event->hw_ring = event_cfg->hardware_event;
756 if (mhi_event->hw_ring)
757 mhi_cntrl->hw_ev_rings++;
758 else
759 mhi_cntrl->sw_ev_rings++;
760
761 mhi_event->cl_manage = event_cfg->client_managed;
762 mhi_event->offload_ev = event_cfg->offload_channel;
763 mhi_event++;
764 }
765
766 return 0;
767
768 error_ev_cfg:
769
770 kfree(mhi_cntrl->mhi_event);
771 return -EINVAL;
772 }
773
parse_ch_cfg(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)774 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
775 const struct mhi_controller_config *config)
776 {
777 const struct mhi_channel_config *ch_cfg;
778 struct device *dev = mhi_cntrl->cntrl_dev;
779 int i;
780 u32 chan;
781
782 mhi_cntrl->max_chan = config->max_channels;
783
784 /*
785 * The allocation of MHI channels can exceed 32KB in some scenarios,
786 * so to avoid any memory possible allocation failures, vzalloc is
787 * used here
788 */
789 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
790 sizeof(*mhi_cntrl->mhi_chan));
791 if (!mhi_cntrl->mhi_chan)
792 return -ENOMEM;
793
794 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
795
796 /* Populate channel configurations */
797 for (i = 0; i < config->num_channels; i++) {
798 struct mhi_chan *mhi_chan;
799
800 ch_cfg = &config->ch_cfg[i];
801
802 chan = ch_cfg->num;
803 if (chan >= mhi_cntrl->max_chan) {
804 dev_err(dev, "Channel %d not available\n", chan);
805 goto error_chan_cfg;
806 }
807
808 mhi_chan = &mhi_cntrl->mhi_chan[chan];
809 mhi_chan->name = ch_cfg->name;
810 mhi_chan->chan = chan;
811
812 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
813 if (!mhi_chan->tre_ring.elements)
814 goto error_chan_cfg;
815
816 /*
817 * For some channels, local ring length should be bigger than
818 * the transfer ring length due to internal logical channels
819 * in device. So host can queue much more buffers than transfer
820 * ring length. Example, RSC channels should have a larger local
821 * channel length than transfer ring length.
822 */
823 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
824 if (!mhi_chan->buf_ring.elements)
825 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
826 mhi_chan->er_index = ch_cfg->event_ring;
827 mhi_chan->dir = ch_cfg->dir;
828
829 /*
830 * For most channels, chtype is identical to channel directions.
831 * So, if it is not defined then assign channel direction to
832 * chtype
833 */
834 mhi_chan->type = ch_cfg->type;
835 if (!mhi_chan->type)
836 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
837
838 mhi_chan->ee_mask = ch_cfg->ee_mask;
839 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
840 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
841 mhi_chan->offload_ch = ch_cfg->offload_channel;
842 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
843 mhi_chan->wake_capable = ch_cfg->wake_capable;
844
845 /*
846 * Bi-directional and direction less channel must be an
847 * offload channel
848 */
849 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
850 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
851 dev_err(dev, "Invalid channel configuration\n");
852 goto error_chan_cfg;
853 }
854
855 if (!mhi_chan->offload_ch) {
856 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
857 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
858 dev_err(dev, "Invalid Door bell mode\n");
859 goto error_chan_cfg;
860 }
861 }
862
863 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
864 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
865 else
866 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
867
868 mhi_chan->configured = true;
869
870 if (mhi_chan->lpm_notify)
871 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
872 }
873
874 return 0;
875
876 error_chan_cfg:
877 vfree(mhi_cntrl->mhi_chan);
878
879 return -EINVAL;
880 }
881
parse_config(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)882 static int parse_config(struct mhi_controller *mhi_cntrl,
883 const struct mhi_controller_config *config)
884 {
885 int ret;
886
887 /* Parse MHI channel configuration */
888 ret = parse_ch_cfg(mhi_cntrl, config);
889 if (ret)
890 return ret;
891
892 /* Parse MHI event configuration */
893 ret = parse_ev_cfg(mhi_cntrl, config);
894 if (ret)
895 goto error_ev_cfg;
896
897 mhi_cntrl->timeout_ms = config->timeout_ms;
898 if (!mhi_cntrl->timeout_ms)
899 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
900
901 mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
902 mhi_cntrl->bounce_buf = config->use_bounce_buf;
903 mhi_cntrl->buffer_len = config->buf_len;
904 if (!mhi_cntrl->buffer_len)
905 mhi_cntrl->buffer_len = MHI_MAX_MTU;
906
907 /* By default, host is allowed to ring DB in both M0 and M2 states */
908 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
909 if (config->m2_no_db)
910 mhi_cntrl->db_access &= ~MHI_PM_M2;
911
912 return 0;
913
914 error_ev_cfg:
915 vfree(mhi_cntrl->mhi_chan);
916
917 return ret;
918 }
919
mhi_register_controller(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)920 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
921 const struct mhi_controller_config *config)
922 {
923 struct mhi_event *mhi_event;
924 struct mhi_chan *mhi_chan;
925 struct mhi_cmd *mhi_cmd;
926 struct mhi_device *mhi_dev;
927 int ret, i;
928
929 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
930 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
931 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
932 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
933 !mhi_cntrl->irq || !mhi_cntrl->reg_len)
934 return -EINVAL;
935
936 ret = parse_config(mhi_cntrl, config);
937 if (ret)
938 return -EINVAL;
939
940 mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS);
941 if (!mhi_cntrl->mhi_cmd) {
942 ret = -ENOMEM;
943 goto err_free_event;
944 }
945
946 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
947 mutex_init(&mhi_cntrl->pm_mutex);
948 rwlock_init(&mhi_cntrl->pm_lock);
949 spin_lock_init(&mhi_cntrl->transition_lock);
950 spin_lock_init(&mhi_cntrl->wlock);
951 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
952 init_waitqueue_head(&mhi_cntrl->state_event);
953
954 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
955 if (!mhi_cntrl->hiprio_wq) {
956 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
957 ret = -ENOMEM;
958 goto err_free_cmd;
959 }
960
961 mhi_cmd = mhi_cntrl->mhi_cmd;
962 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
963 spin_lock_init(&mhi_cmd->lock);
964
965 mhi_event = mhi_cntrl->mhi_event;
966 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
967 /* Skip for offload events */
968 if (mhi_event->offload_ev)
969 continue;
970
971 mhi_event->mhi_cntrl = mhi_cntrl;
972 spin_lock_init(&mhi_event->lock);
973 if (mhi_event->data_type == MHI_ER_CTRL)
974 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
975 (ulong)mhi_event);
976 else
977 tasklet_init(&mhi_event->task, mhi_ev_task,
978 (ulong)mhi_event);
979 }
980
981 mhi_chan = mhi_cntrl->mhi_chan;
982 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
983 mutex_init(&mhi_chan->mutex);
984 init_completion(&mhi_chan->completion);
985 rwlock_init(&mhi_chan->lock);
986
987 /* used in setting bei field of TRE */
988 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
989 mhi_chan->intmod = mhi_event->intmod;
990 }
991
992 if (mhi_cntrl->bounce_buf) {
993 mhi_cntrl->map_single = mhi_map_single_use_bb;
994 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
995 } else {
996 mhi_cntrl->map_single = mhi_map_single_no_bb;
997 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
998 }
999
1000 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
1001 if (mhi_cntrl->index < 0) {
1002 ret = mhi_cntrl->index;
1003 goto err_destroy_wq;
1004 }
1005
1006 ret = mhi_init_irq_setup(mhi_cntrl);
1007 if (ret)
1008 goto err_ida_free;
1009
1010 /* Register controller with MHI bus */
1011 mhi_dev = mhi_alloc_device(mhi_cntrl);
1012 if (IS_ERR(mhi_dev)) {
1013 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
1014 ret = PTR_ERR(mhi_dev);
1015 goto error_setup_irq;
1016 }
1017
1018 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
1019 mhi_dev->mhi_cntrl = mhi_cntrl;
1020 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
1021 mhi_dev->name = dev_name(&mhi_dev->dev);
1022
1023 /* Init wakeup source */
1024 device_init_wakeup(&mhi_dev->dev, true);
1025
1026 ret = device_add(&mhi_dev->dev);
1027 if (ret)
1028 goto err_release_dev;
1029
1030 if (mhi_cntrl->edl_trigger) {
1031 ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr);
1032 if (ret)
1033 goto err_release_dev;
1034 }
1035
1036 mhi_cntrl->mhi_dev = mhi_dev;
1037
1038 mhi_create_debugfs(mhi_cntrl);
1039
1040 return 0;
1041
1042 err_release_dev:
1043 put_device(&mhi_dev->dev);
1044 error_setup_irq:
1045 mhi_deinit_free_irq(mhi_cntrl);
1046 err_ida_free:
1047 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1048 err_destroy_wq:
1049 destroy_workqueue(mhi_cntrl->hiprio_wq);
1050 err_free_cmd:
1051 kfree(mhi_cntrl->mhi_cmd);
1052 err_free_event:
1053 kfree(mhi_cntrl->mhi_event);
1054 vfree(mhi_cntrl->mhi_chan);
1055
1056 return ret;
1057 }
1058 EXPORT_SYMBOL_GPL(mhi_register_controller);
1059
mhi_unregister_controller(struct mhi_controller * mhi_cntrl)1060 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1061 {
1062 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1063 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1064 unsigned int i;
1065
1066 mhi_deinit_free_irq(mhi_cntrl);
1067 mhi_destroy_debugfs(mhi_cntrl);
1068
1069 if (mhi_cntrl->edl_trigger)
1070 sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr);
1071
1072 destroy_workqueue(mhi_cntrl->hiprio_wq);
1073 kfree(mhi_cntrl->mhi_cmd);
1074 kfree(mhi_cntrl->mhi_event);
1075
1076 /* Drop the references to MHI devices created for channels */
1077 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1078 if (!mhi_chan->mhi_dev)
1079 continue;
1080
1081 put_device(&mhi_chan->mhi_dev->dev);
1082 }
1083 vfree(mhi_cntrl->mhi_chan);
1084
1085 device_del(&mhi_dev->dev);
1086 put_device(&mhi_dev->dev);
1087
1088 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1089 }
1090 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1091
mhi_alloc_controller(void)1092 struct mhi_controller *mhi_alloc_controller(void)
1093 {
1094 struct mhi_controller *mhi_cntrl;
1095
1096 mhi_cntrl = kzalloc_obj(*mhi_cntrl);
1097
1098 return mhi_cntrl;
1099 }
1100 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1101
mhi_free_controller(struct mhi_controller * mhi_cntrl)1102 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1103 {
1104 kfree(mhi_cntrl);
1105 }
1106 EXPORT_SYMBOL_GPL(mhi_free_controller);
1107
mhi_prepare_for_power_up(struct mhi_controller * mhi_cntrl)1108 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1109 {
1110 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1111 u32 bhi_off, bhie_off;
1112 int ret;
1113
1114 mutex_lock(&mhi_cntrl->pm_mutex);
1115
1116 ret = mhi_init_dev_ctxt(mhi_cntrl);
1117 if (ret)
1118 goto error_dev_ctxt;
1119
1120 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1121 if (ret) {
1122 dev_err(dev, "Error getting BHI offset\n");
1123 goto error_reg_offset;
1124 }
1125
1126 if (bhi_off >= mhi_cntrl->reg_len) {
1127 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
1128 bhi_off, mhi_cntrl->reg_len);
1129 ret = -ERANGE;
1130 goto error_reg_offset;
1131 }
1132 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1133
1134 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
1135 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1136 &bhie_off);
1137 if (ret) {
1138 dev_err(dev, "Error getting BHIE offset\n");
1139 goto error_reg_offset;
1140 }
1141
1142 if (bhie_off >= mhi_cntrl->reg_len) {
1143 dev_err(dev,
1144 "BHIe offset: 0x%x is out of range: 0x%zx\n",
1145 bhie_off, mhi_cntrl->reg_len);
1146 ret = -ERANGE;
1147 goto error_reg_offset;
1148 }
1149 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1150 }
1151
1152 if (mhi_cntrl->rddm_size) {
1153 /*
1154 * This controller supports RDDM, so we need to manually clear
1155 * BHIE RX registers since POR values are undefined.
1156 */
1157 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1158 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1159 4);
1160 /*
1161 * Allocate RDDM table for debugging purpose if specified
1162 */
1163 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1164 mhi_cntrl->rddm_size);
1165 if (mhi_cntrl->rddm_image) {
1166 ret = mhi_rddm_prepare(mhi_cntrl,
1167 mhi_cntrl->rddm_image);
1168 if (ret) {
1169 mhi_free_bhie_table(mhi_cntrl,
1170 mhi_cntrl->rddm_image);
1171 goto error_reg_offset;
1172 }
1173 }
1174 }
1175
1176 mutex_unlock(&mhi_cntrl->pm_mutex);
1177
1178 return 0;
1179
1180 error_reg_offset:
1181 mhi_deinit_dev_ctxt(mhi_cntrl);
1182
1183 error_dev_ctxt:
1184 mutex_unlock(&mhi_cntrl->pm_mutex);
1185
1186 return ret;
1187 }
1188 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1189
mhi_unprepare_after_power_down(struct mhi_controller * mhi_cntrl)1190 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1191 {
1192 if (mhi_cntrl->fbc_image) {
1193 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1194 mhi_cntrl->fbc_image = NULL;
1195 }
1196
1197 if (mhi_cntrl->rddm_image) {
1198 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1199 mhi_cntrl->rddm_image = NULL;
1200 }
1201
1202 mhi_cntrl->bhi = NULL;
1203 mhi_cntrl->bhie = NULL;
1204
1205 mhi_deinit_dev_ctxt(mhi_cntrl);
1206 }
1207 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1208
mhi_release_device(struct device * dev)1209 static void mhi_release_device(struct device *dev)
1210 {
1211 struct mhi_device *mhi_dev = to_mhi_device(dev);
1212
1213 /*
1214 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1215 * devices for the channels will only get created if the mhi_dev
1216 * associated with it is NULL. This scenario will happen during the
1217 * controller suspend and resume.
1218 */
1219 if (mhi_dev->ul_chan)
1220 mhi_dev->ul_chan->mhi_dev = NULL;
1221
1222 if (mhi_dev->dl_chan)
1223 mhi_dev->dl_chan->mhi_dev = NULL;
1224
1225 kfree(mhi_dev);
1226 }
1227
mhi_alloc_device(struct mhi_controller * mhi_cntrl)1228 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1229 {
1230 struct mhi_device *mhi_dev;
1231 struct device *dev;
1232
1233 mhi_dev = kzalloc_obj(*mhi_dev);
1234 if (!mhi_dev)
1235 return ERR_PTR(-ENOMEM);
1236
1237 dev = &mhi_dev->dev;
1238 device_initialize(dev);
1239 dev->bus = &mhi_bus_type;
1240 dev->release = mhi_release_device;
1241
1242 if (mhi_cntrl->mhi_dev) {
1243 /* for MHI client devices, parent is the MHI controller device */
1244 dev->parent = &mhi_cntrl->mhi_dev->dev;
1245 } else {
1246 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1247 dev->parent = mhi_cntrl->cntrl_dev;
1248 }
1249
1250 mhi_dev->mhi_cntrl = mhi_cntrl;
1251 mhi_dev->dev_wake = 0;
1252
1253 return mhi_dev;
1254 }
1255
mhi_probe(struct device * dev)1256 static int mhi_probe(struct device *dev)
1257 {
1258 struct mhi_device *mhi_dev = to_mhi_device(dev);
1259 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1260 struct device_driver *drv = dev->driver;
1261 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1262 struct mhi_event *mhi_event;
1263 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1264 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1265 int ret;
1266
1267 /* Bring device out of LPM */
1268 ret = mhi_device_get_sync(mhi_dev);
1269 if (ret)
1270 return ret;
1271
1272 ret = -EINVAL;
1273
1274 if (ul_chan) {
1275 /*
1276 * If channel supports LPM notifications then status_cb should
1277 * be provided
1278 */
1279 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1280 goto exit_probe;
1281
1282 /* For non-offload channels then xfer_cb should be provided */
1283 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1284 goto exit_probe;
1285
1286 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1287 }
1288
1289 ret = -EINVAL;
1290 if (dl_chan) {
1291 /*
1292 * If channel supports LPM notifications then status_cb should
1293 * be provided
1294 */
1295 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1296 goto exit_probe;
1297
1298 /* For non-offload channels then xfer_cb should be provided */
1299 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1300 goto exit_probe;
1301
1302 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1303
1304 /*
1305 * If the channel event ring is managed by client, then
1306 * status_cb must be provided so that the framework can
1307 * notify pending data
1308 */
1309 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1310 goto exit_probe;
1311
1312 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1313 }
1314
1315 /* Call the user provided probe function */
1316 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1317 if (ret)
1318 goto exit_probe;
1319
1320 mhi_device_put(mhi_dev);
1321
1322 return ret;
1323
1324 exit_probe:
1325 mhi_unprepare_from_transfer(mhi_dev);
1326
1327 mhi_device_put(mhi_dev);
1328
1329 return ret;
1330 }
1331
mhi_remove(struct device * dev)1332 static void mhi_remove(struct device *dev)
1333 {
1334 struct mhi_device *mhi_dev = to_mhi_device(dev);
1335 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1336 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1337 struct mhi_chan *mhi_chan;
1338 enum mhi_ch_state ch_state[] = {
1339 MHI_CH_STATE_DISABLED,
1340 MHI_CH_STATE_DISABLED
1341 };
1342 int dir;
1343
1344 /* Skip if it is a controller device */
1345 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1346 return;
1347
1348 /* Reset both channels */
1349 for (dir = 0; dir < 2; dir++) {
1350 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1351
1352 if (!mhi_chan)
1353 continue;
1354
1355 /* Wake all threads waiting for completion */
1356 write_lock_irq(&mhi_chan->lock);
1357 mhi_chan->ccs = MHI_EV_CC_INVALID;
1358 complete_all(&mhi_chan->completion);
1359 write_unlock_irq(&mhi_chan->lock);
1360
1361 /* Set the channel state to disabled */
1362 mutex_lock(&mhi_chan->mutex);
1363 write_lock_irq(&mhi_chan->lock);
1364 ch_state[dir] = mhi_chan->ch_state;
1365 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1366 write_unlock_irq(&mhi_chan->lock);
1367
1368 /* Reset the non-offload channel */
1369 if (!mhi_chan->offload_ch)
1370 mhi_reset_chan(mhi_cntrl, mhi_chan);
1371
1372 mutex_unlock(&mhi_chan->mutex);
1373 }
1374
1375 mhi_drv->remove(mhi_dev);
1376
1377 /* De-init channel if it was enabled */
1378 for (dir = 0; dir < 2; dir++) {
1379 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1380
1381 if (!mhi_chan)
1382 continue;
1383
1384 mutex_lock(&mhi_chan->mutex);
1385
1386 if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1387 ch_state[dir] == MHI_CH_STATE_STOP) &&
1388 !mhi_chan->offload_ch)
1389 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1390
1391 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1392
1393 mutex_unlock(&mhi_chan->mutex);
1394 }
1395
1396 while (mhi_dev->dev_wake)
1397 mhi_device_put(mhi_dev);
1398 }
1399
__mhi_driver_register(struct mhi_driver * mhi_drv,struct module * owner)1400 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1401 {
1402 struct device_driver *driver = &mhi_drv->driver;
1403
1404 if (!mhi_drv->probe || !mhi_drv->remove)
1405 return -EINVAL;
1406
1407 driver->bus = &mhi_bus_type;
1408 driver->owner = owner;
1409
1410 return driver_register(driver);
1411 }
1412 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1413
mhi_driver_unregister(struct mhi_driver * mhi_drv)1414 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1415 {
1416 driver_unregister(&mhi_drv->driver);
1417 }
1418 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1419
mhi_uevent(const struct device * dev,struct kobj_uevent_env * env)1420 static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
1421 {
1422 const struct mhi_device *mhi_dev = to_mhi_device(dev);
1423
1424 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1425 mhi_dev->name);
1426 }
1427
mhi_match(struct device * dev,const struct device_driver * drv)1428 static int mhi_match(struct device *dev, const struct device_driver *drv)
1429 {
1430 struct mhi_device *mhi_dev = to_mhi_device(dev);
1431 const struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1432 const struct mhi_device_id *id;
1433
1434 /*
1435 * If the device is a controller type then there is no client driver
1436 * associated with it
1437 */
1438 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1439 return 0;
1440
1441 for (id = mhi_drv->id_table; id->chan[0]; id++)
1442 if (!strcmp(mhi_dev->name, id->chan)) {
1443 mhi_dev->id = id;
1444 return 1;
1445 }
1446
1447 return 0;
1448 };
1449
1450 const struct bus_type mhi_bus_type = {
1451 .name = "mhi",
1452 .dev_name = "mhi",
1453 .match = mhi_match,
1454 .uevent = mhi_uevent,
1455 .probe = mhi_probe,
1456 .remove = mhi_remove,
1457 .dev_groups = mhi_dev_groups,
1458 };
1459
mhi_init(void)1460 static int __init mhi_init(void)
1461 {
1462 mhi_debugfs_init();
1463 return bus_register(&mhi_bus_type);
1464 }
1465
mhi_exit(void)1466 static void __exit mhi_exit(void)
1467 {
1468 mhi_debugfs_exit();
1469 bus_unregister(&mhi_bus_type);
1470 }
1471
1472 postcore_initcall(mhi_init);
1473 module_exit(mhi_exit);
1474
1475 MODULE_LICENSE("GPL v2");
1476 MODULE_DESCRIPTION("Modem Host Interface");
1477