1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013--2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/iopoll.h>
15 #include <linux/math64.h>
16 #include <linux/mm.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/pfn.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/time64.h>
24
25 #include "ipu6.h"
26 #include "ipu6-bus.h"
27 #include "ipu6-buttress.h"
28 #include "ipu6-platform-buttress-regs.h"
29
30 #define BOOTLOADER_STATUS_OFFSET 0x15c
31
32 #define BOOTLOADER_MAGIC_KEY 0xb00710ad
33
34 #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
35 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
36 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
37
38 #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10
39
40 #define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC)
41
42 #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC)
43 #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC)
44 #define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC)
45
46 #define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC
47 #define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC
48 #define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC)
49 #define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC)
50
51 #define BUTTRESS_IPC_RESET_RETRY 2000
52 #define BUTTRESS_CSE_IPC_RESET_RETRY 4
53 #define BUTTRESS_IPC_CMD_SEND_RETRY 1
54
55 #define BUTTRESS_MAX_CONSECUTIVE_IRQS 100
56
57 static const u32 ipu6_adev_irq_mask[2] = {
58 BUTTRESS_ISR_IS_IRQ,
59 BUTTRESS_ISR_PS_IRQ
60 };
61
ipu6_buttress_ipc_reset(struct ipu6_device * isp,struct ipu6_buttress_ipc * ipc)62 int ipu6_buttress_ipc_reset(struct ipu6_device *isp,
63 struct ipu6_buttress_ipc *ipc)
64 {
65 unsigned int retries = BUTTRESS_IPC_RESET_RETRY;
66 struct ipu6_buttress *b = &isp->buttress;
67 u32 val = 0, csr_in_clr;
68
69 if (!isp->secure_mode) {
70 dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode");
71 return 0;
72 }
73
74 mutex_lock(&b->ipc_mutex);
75
76 /* Clear-by-1 CSR (all bits), corresponding internal states. */
77 val = readl(isp->base + ipc->csr_in);
78 writel(val, isp->base + ipc->csr_in);
79
80 /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
81 writel(ENTRY, isp->base + ipc->csr_out);
82 /*
83 * Clear-by-1 all CSR bits EXCEPT following
84 * bits:
85 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
86 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
87 * C. Possibly custom bits, depending on
88 * their role.
89 */
90 csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
91 BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
92 BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
93
94 do {
95 usleep_range(400, 500);
96 val = readl(isp->base + ipc->csr_in);
97 switch (val) {
98 case ENTRY | EXIT:
99 case ENTRY | EXIT | QUERY:
100 /*
101 * 1) Clear-by-1 CSR bits
102 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
103 * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
104 * 2) Set peer CSR bit
105 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
106 */
107 writel(ENTRY | EXIT, isp->base + ipc->csr_in);
108 writel(QUERY, isp->base + ipc->csr_out);
109 break;
110 case ENTRY:
111 case ENTRY | QUERY:
112 /*
113 * 1) Clear-by-1 CSR bits
114 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
115 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
116 * 2) Set peer CSR bit
117 * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
118 */
119 writel(ENTRY | QUERY, isp->base + ipc->csr_in);
120 writel(ENTRY, isp->base + ipc->csr_out);
121 break;
122 case EXIT:
123 case EXIT | QUERY:
124 /*
125 * Clear-by-1 CSR bit
126 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
127 * 1) Clear incoming doorbell.
128 * 2) Clear-by-1 all CSR bits EXCEPT following
129 * bits:
130 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
131 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
132 * C. Possibly custom bits, depending on
133 * their role.
134 * 3) Set peer CSR bit
135 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
136 */
137 writel(EXIT, isp->base + ipc->csr_in);
138 writel(0, isp->base + ipc->db0_in);
139 writel(csr_in_clr, isp->base + ipc->csr_in);
140 writel(EXIT, isp->base + ipc->csr_out);
141
142 /*
143 * Read csr_in again to make sure if RST_PHASE2 is done.
144 * If csr_in is QUERY, it should be handled again.
145 */
146 usleep_range(200, 300);
147 val = readl(isp->base + ipc->csr_in);
148 if (val & QUERY) {
149 dev_dbg(&isp->pdev->dev,
150 "RST_PHASE2 retry csr_in = %x\n", val);
151 break;
152 }
153 mutex_unlock(&b->ipc_mutex);
154 return 0;
155 case QUERY:
156 /*
157 * 1) Clear-by-1 CSR bit
158 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
159 * 2) Set peer CSR bit
160 * IPC_PEER_COMP_ACTIONS_RST_PHASE1
161 */
162 writel(QUERY, isp->base + ipc->csr_in);
163 writel(ENTRY, isp->base + ipc->csr_out);
164 break;
165 default:
166 dev_dbg_ratelimited(&isp->pdev->dev,
167 "Unexpected CSR 0x%x\n", val);
168 break;
169 }
170 } while (retries--);
171
172 mutex_unlock(&b->ipc_mutex);
173 dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n");
174
175 return -ETIMEDOUT;
176 }
177
ipu6_buttress_ipc_validity_close(struct ipu6_device * isp,struct ipu6_buttress_ipc * ipc)178 static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp,
179 struct ipu6_buttress_ipc *ipc)
180 {
181 writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
182 isp->base + ipc->csr_out);
183 }
184
185 static int
ipu6_buttress_ipc_validity_open(struct ipu6_device * isp,struct ipu6_buttress_ipc * ipc)186 ipu6_buttress_ipc_validity_open(struct ipu6_device *isp,
187 struct ipu6_buttress_ipc *ipc)
188 {
189 unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
190 void __iomem *addr;
191 int ret;
192 u32 val;
193
194 writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
195 isp->base + ipc->csr_out);
196
197 addr = isp->base + ipc->csr_in;
198 ret = readl_poll_timeout(addr, val, val & mask, 200,
199 BUTTRESS_IPC_VALIDITY_TIMEOUT_US);
200 if (ret) {
201 dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val);
202 ipu6_buttress_ipc_validity_close(isp, ipc);
203 }
204
205 return ret;
206 }
207
ipu6_buttress_ipc_recv(struct ipu6_device * isp,struct ipu6_buttress_ipc * ipc,u32 * ipc_msg)208 static void ipu6_buttress_ipc_recv(struct ipu6_device *isp,
209 struct ipu6_buttress_ipc *ipc, u32 *ipc_msg)
210 {
211 if (ipc_msg)
212 *ipc_msg = readl(isp->base + ipc->data0_in);
213 writel(0, isp->base + ipc->db0_in);
214 }
215
ipu6_buttress_ipc_send_bulk(struct ipu6_device * isp,enum ipu6_buttress_ipc_domain ipc_domain,struct ipu6_ipc_buttress_bulk_msg * msgs,u32 size)216 static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp,
217 enum ipu6_buttress_ipc_domain ipc_domain,
218 struct ipu6_ipc_buttress_bulk_msg *msgs,
219 u32 size)
220 {
221 unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
222 unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY;
223 struct ipu6_buttress *b = &isp->buttress;
224 struct ipu6_buttress_ipc *ipc;
225 u32 val;
226 int ret;
227 int tout;
228
229 ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish;
230
231 mutex_lock(&b->ipc_mutex);
232
233 ret = ipu6_buttress_ipc_validity_open(isp, ipc);
234 if (ret) {
235 dev_err(&isp->pdev->dev, "IPC validity open failed\n");
236 goto out;
237 }
238
239 tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS);
240 rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS);
241
242 for (i = 0; i < size; i++) {
243 reinit_completion(&ipc->send_complete);
244 if (msgs[i].require_resp)
245 reinit_completion(&ipc->recv_complete);
246
247 dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n",
248 msgs[i].cmd);
249 writel(msgs[i].cmd, isp->base + ipc->data0_out);
250 val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size;
251 writel(val, isp->base + ipc->db0_out);
252
253 tout = wait_for_completion_timeout(&ipc->send_complete,
254 tx_timeout_jiffies);
255 if (!tout) {
256 dev_err(&isp->pdev->dev, "send IPC response timeout\n");
257 if (!retry--) {
258 ret = -ETIMEDOUT;
259 goto out;
260 }
261
262 /* Try again if CSE is not responding on first try */
263 writel(0, isp->base + ipc->db0_out);
264 i--;
265 continue;
266 }
267
268 retry = BUTTRESS_IPC_CMD_SEND_RETRY;
269
270 if (!msgs[i].require_resp)
271 continue;
272
273 tout = wait_for_completion_timeout(&ipc->recv_complete,
274 rx_timeout_jiffies);
275 if (!tout) {
276 dev_err(&isp->pdev->dev, "recv IPC response timeout\n");
277 ret = -ETIMEDOUT;
278 goto out;
279 }
280
281 if (ipc->nack_mask &&
282 (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
283 dev_err(&isp->pdev->dev,
284 "IPC NACK for cmd 0x%x\n", msgs[i].cmd);
285 ret = -EIO;
286 goto out;
287 }
288
289 if (ipc->recv_data != msgs[i].expected_resp) {
290 dev_err(&isp->pdev->dev,
291 "expected resp: 0x%x, IPC response: 0x%x ",
292 msgs[i].expected_resp, ipc->recv_data);
293 ret = -EIO;
294 goto out;
295 }
296 }
297
298 dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n");
299
300 out:
301 ipu6_buttress_ipc_validity_close(isp, ipc);
302 mutex_unlock(&b->ipc_mutex);
303 return ret;
304 }
305
306 static int
ipu6_buttress_ipc_send(struct ipu6_device * isp,enum ipu6_buttress_ipc_domain ipc_domain,u32 ipc_msg,u32 size,bool require_resp,u32 expected_resp)307 ipu6_buttress_ipc_send(struct ipu6_device *isp,
308 enum ipu6_buttress_ipc_domain ipc_domain,
309 u32 ipc_msg, u32 size, bool require_resp,
310 u32 expected_resp)
311 {
312 struct ipu6_ipc_buttress_bulk_msg msg = {
313 .cmd = ipc_msg,
314 .cmd_size = size,
315 .require_resp = require_resp,
316 .expected_resp = expected_resp,
317 };
318
319 return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1);
320 }
321
ipu6_buttress_call_isr(struct ipu6_bus_device * adev)322 static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev)
323 {
324 irqreturn_t ret = IRQ_WAKE_THREAD;
325
326 if (!adev || !adev->auxdrv || !adev->auxdrv_data)
327 return IRQ_NONE;
328
329 if (adev->auxdrv_data->isr)
330 ret = adev->auxdrv_data->isr(adev);
331
332 if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded)
333 ret = IRQ_NONE;
334
335 return ret;
336 }
337
ipu6_buttress_isr(int irq,void * isp_ptr)338 irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr)
339 {
340 struct ipu6_device *isp = isp_ptr;
341 struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
342 struct ipu6_buttress *b = &isp->buttress;
343 u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS;
344 irqreturn_t ret = IRQ_NONE;
345 u32 disable_irqs = 0;
346 u32 irq_status;
347 u32 i, count = 0;
348
349 pm_runtime_get_noresume(&isp->pdev->dev);
350
351 irq_status = readl(isp->base + reg_irq_sts);
352 if (!irq_status) {
353 pm_runtime_put_noidle(&isp->pdev->dev);
354 return IRQ_NONE;
355 }
356
357 do {
358 writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
359
360 for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) {
361 irqreturn_t r = ipu6_buttress_call_isr(adev[i]);
362
363 if (!(irq_status & ipu6_adev_irq_mask[i]))
364 continue;
365
366 if (r == IRQ_WAKE_THREAD) {
367 ret = IRQ_WAKE_THREAD;
368 disable_irqs |= ipu6_adev_irq_mask[i];
369 } else if (ret == IRQ_NONE && r == IRQ_HANDLED) {
370 ret = IRQ_HANDLED;
371 }
372 }
373
374 if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE)
375 ret = IRQ_HANDLED;
376
377 if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) {
378 dev_dbg(&isp->pdev->dev,
379 "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
380 ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
381 complete(&b->cse.recv_complete);
382 }
383
384 if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) {
385 dev_dbg(&isp->pdev->dev,
386 "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n");
387 ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data);
388 complete(&b->ish.recv_complete);
389 }
390
391 if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) {
392 dev_dbg(&isp->pdev->dev,
393 "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
394 complete(&b->cse.send_complete);
395 }
396
397 if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) {
398 dev_dbg(&isp->pdev->dev,
399 "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
400 complete(&b->ish.send_complete);
401 }
402
403 if (irq_status & BUTTRESS_ISR_SAI_VIOLATION &&
404 ipu6_buttress_get_secure_mode(isp))
405 dev_err(&isp->pdev->dev,
406 "BUTTRESS_ISR_SAI_VIOLATION\n");
407
408 if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR |
409 BUTTRESS_ISR_PS_FATAL_MEM_ERR))
410 dev_err(&isp->pdev->dev,
411 "BUTTRESS_ISR_FATAL_MEM_ERR\n");
412
413 if (irq_status & BUTTRESS_ISR_UFI_ERROR)
414 dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n");
415
416 if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) {
417 dev_err(&isp->pdev->dev, "too many consecutive IRQs\n");
418 ret = IRQ_NONE;
419 break;
420 }
421
422 irq_status = readl(isp->base + reg_irq_sts);
423 } while (irq_status);
424
425 if (disable_irqs)
426 writel(BUTTRESS_IRQS & ~disable_irqs,
427 isp->base + BUTTRESS_REG_ISR_ENABLE);
428
429 pm_runtime_put(&isp->pdev->dev);
430
431 return ret;
432 }
433
ipu6_buttress_isr_threaded(int irq,void * isp_ptr)434 irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr)
435 {
436 struct ipu6_device *isp = isp_ptr;
437 struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
438 const struct ipu6_auxdrv_data *drv_data = NULL;
439 irqreturn_t ret = IRQ_NONE;
440 unsigned int i;
441
442 for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) {
443 drv_data = adev[i]->auxdrv_data;
444 if (!drv_data)
445 continue;
446
447 if (drv_data->wake_isr_thread &&
448 drv_data->isr_threaded(adev[i]) == IRQ_HANDLED)
449 ret = IRQ_HANDLED;
450 }
451
452 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
453
454 return ret;
455 }
456
ipu6_buttress_power(struct device * dev,struct ipu6_buttress_ctrl * ctrl,bool on)457 int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
458 bool on)
459 {
460 struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp;
461 u32 pwr_sts, val;
462 int ret;
463
464 if (!ctrl)
465 return 0;
466
467 mutex_lock(&isp->buttress.power_mutex);
468
469 if (!on) {
470 val = 0;
471 pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
472 } else {
473 val = BUTTRESS_FREQ_CTL_START |
474 FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK,
475 ctrl->ratio) |
476 FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK,
477 ctrl->qos_floor) |
478 BUTTRESS_FREQ_CTL_ICCMAX_LEVEL;
479
480 pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
481 }
482
483 writel(val, isp->base + ctrl->freq_ctl);
484
485 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE,
486 val, (val & ctrl->pwr_sts_mask) == pwr_sts,
487 100, BUTTRESS_POWER_TIMEOUT_US);
488 if (ret)
489 dev_err(&isp->pdev->dev,
490 "Change power status timeout with 0x%x\n", val);
491
492 ctrl->started = !ret && on;
493
494 mutex_unlock(&isp->buttress.power_mutex);
495
496 return ret;
497 }
498
ipu6_buttress_get_secure_mode(struct ipu6_device * isp)499 bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp)
500 {
501 u32 val;
502
503 val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
504
505 return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
506 }
507
ipu6_buttress_auth_done(struct ipu6_device * isp)508 bool ipu6_buttress_auth_done(struct ipu6_device *isp)
509 {
510 u32 val;
511
512 if (!isp->secure_mode)
513 return true;
514
515 val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
516 val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val);
517
518 return val == BUTTRESS_SECURITY_CTL_AUTH_DONE;
519 }
520 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6);
521
ipu6_buttress_reset_authentication(struct ipu6_device * isp)522 int ipu6_buttress_reset_authentication(struct ipu6_device *isp)
523 {
524 int ret;
525 u32 val;
526
527 if (!isp->secure_mode) {
528 dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
529 return 0;
530 }
531
532 writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
533 BUTTRESS_REG_FW_RESET_CTL);
534
535 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
536 val & BUTTRESS_FW_RESET_CTL_DONE, 500,
537 BUTTRESS_CSE_FWRESET_TIMEOUT_US);
538 if (ret) {
539 dev_err(&isp->pdev->dev,
540 "Time out while resetting authentication state\n");
541 return ret;
542 }
543
544 dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n");
545 writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
546 /* leave some time for HW restore */
547 usleep_range(800, 1000);
548
549 return 0;
550 }
551
ipu6_buttress_map_fw_image(struct ipu6_bus_device * sys,const struct firmware * fw,struct sg_table * sgt)552 int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
553 const struct firmware *fw, struct sg_table *sgt)
554 {
555 bool is_vmalloc = is_vmalloc_addr(fw->data);
556 struct page **pages;
557 const void *addr;
558 unsigned long n_pages;
559 unsigned int i;
560 int ret;
561
562 if (!is_vmalloc && !virt_addr_valid(fw->data))
563 return -EDOM;
564
565 n_pages = PHYS_PFN(PAGE_ALIGN(fw->size));
566
567 pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
568 if (!pages)
569 return -ENOMEM;
570
571 addr = fw->data;
572 for (i = 0; i < n_pages; i++) {
573 struct page *p = is_vmalloc ?
574 vmalloc_to_page(addr) : virt_to_page(addr);
575
576 if (!p) {
577 ret = -ENOMEM;
578 goto out;
579 }
580 pages[i] = p;
581 addr += PAGE_SIZE;
582 }
583
584 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size,
585 GFP_KERNEL);
586 if (ret) {
587 ret = -ENOMEM;
588 goto out;
589 }
590
591 ret = dma_map_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
592 if (ret < 0) {
593 ret = -ENOMEM;
594 sg_free_table(sgt);
595 goto out;
596 }
597
598 dma_sync_sgtable_for_device(&sys->auxdev.dev, sgt, DMA_TO_DEVICE);
599
600 out:
601 kfree(pages);
602
603 return ret;
604 }
605 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6);
606
ipu6_buttress_unmap_fw_image(struct ipu6_bus_device * sys,struct sg_table * sgt)607 void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
608 struct sg_table *sgt)
609 {
610 dma_unmap_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
611 sg_free_table(sgt);
612 }
613 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6);
614
ipu6_buttress_authenticate(struct ipu6_device * isp)615 int ipu6_buttress_authenticate(struct ipu6_device *isp)
616 {
617 struct ipu6_buttress *b = &isp->buttress;
618 struct ipu6_psys_pdata *psys_pdata;
619 u32 data, mask, done, fail;
620 int ret;
621
622 if (!isp->secure_mode) {
623 dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
624 return 0;
625 }
626
627 psys_pdata = isp->psys->pdata;
628
629 mutex_lock(&b->auth_mutex);
630
631 if (ipu6_buttress_auth_done(isp)) {
632 ret = 0;
633 goto out_unlock;
634 }
635
636 /*
637 * Write address of FIT table to FW_SOURCE register
638 * Let's use fw address. I.e. not using FIT table yet
639 */
640 data = lower_32_bits(isp->psys->pkg_dir_dma_addr);
641 writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO);
642
643 data = upper_32_bits(isp->psys->pkg_dir_dma_addr);
644 writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI);
645
646 /*
647 * Write boot_load into IU2CSEDATA0
648 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
649 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
650 */
651 dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n");
652
653 ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE,
654 BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
655 1, true,
656 BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
657 if (ret) {
658 dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
659 goto out_unlock;
660 }
661
662 mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
663 done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
664 fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
665 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
666 ((data & mask) == done ||
667 (data & mask) == fail), 500,
668 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
669 if (ret) {
670 dev_err(&isp->pdev->dev, "CSE boot_load timeout\n");
671 goto out_unlock;
672 }
673
674 if ((data & mask) == fail) {
675 dev_err(&isp->pdev->dev, "CSE auth failed\n");
676 ret = -EINVAL;
677 goto out_unlock;
678 }
679
680 ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET,
681 data, data == BOOTLOADER_MAGIC_KEY, 500,
682 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
683 if (ret) {
684 dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n",
685 data);
686 goto out_unlock;
687 }
688
689 /*
690 * Write authenticate_run into IU2CSEDATA0
691 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
692 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
693 */
694 dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n");
695 ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE,
696 BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
697 1, true,
698 BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
699 if (ret) {
700 dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n");
701 goto out_unlock;
702 }
703
704 done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
705 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
706 ((data & mask) == done ||
707 (data & mask) == fail), 500,
708 BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US);
709 if (ret) {
710 dev_err(&isp->pdev->dev, "CSE authenticate timeout\n");
711 goto out_unlock;
712 }
713
714 if ((data & mask) == fail) {
715 dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
716 ret = -EINVAL;
717 goto out_unlock;
718 }
719
720 dev_info(&isp->pdev->dev, "CSE authenticate_run done\n");
721
722 out_unlock:
723 mutex_unlock(&b->auth_mutex);
724
725 return ret;
726 }
727
ipu6_buttress_send_tsc_request(struct ipu6_device * isp)728 static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp)
729 {
730 u32 val, mask, done;
731 int ret;
732
733 mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK;
734
735 writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC,
736 isp->base + BUTTRESS_REG_FABRIC_CMD);
737
738 val = readl(isp->base + BUTTRESS_REG_PWR_STATE);
739 val = FIELD_GET(mask, val);
740 if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) {
741 dev_err(&isp->pdev->dev, "Start tsc sync failed\n");
742 return -EINVAL;
743 }
744
745 done = BUTTRESS_PWR_STATE_HH_STATE_DONE;
746 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val,
747 FIELD_GET(mask, val) == done, 500,
748 BUTTRESS_TSC_SYNC_TIMEOUT_US);
749 if (ret)
750 dev_err(&isp->pdev->dev, "Start tsc sync timeout\n");
751
752 return ret;
753 }
754
ipu6_buttress_start_tsc_sync(struct ipu6_device * isp)755 int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp)
756 {
757 unsigned int i;
758
759 for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
760 u32 val;
761 int ret;
762
763 ret = ipu6_buttress_send_tsc_request(isp);
764 if (ret != -ETIMEDOUT)
765 return ret;
766
767 val = readl(isp->base + BUTTRESS_REG_TSW_CTL);
768 val = val | BUTTRESS_TSW_CTL_SOFT_RESET;
769 writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
770 val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET;
771 writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
772 }
773
774 dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n");
775
776 return -ETIMEDOUT;
777 }
778 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6);
779
ipu6_buttress_tsc_read(struct ipu6_device * isp,u64 * val)780 void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val)
781 {
782 u32 tsc_hi_1, tsc_hi_2, tsc_lo;
783 unsigned long flags;
784
785 local_irq_save(flags);
786 tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI);
787 tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO);
788 tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI);
789 if (tsc_hi_1 == tsc_hi_2) {
790 *val = (u64)tsc_hi_1 << 32 | tsc_lo;
791 } else {
792 /* Check if TSC has rolled over */
793 if (tsc_lo & BIT(31))
794 *val = (u64)tsc_hi_1 << 32 | tsc_lo;
795 else
796 *val = (u64)tsc_hi_2 << 32 | tsc_lo;
797 }
798 local_irq_restore(flags);
799 }
800 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6);
801
ipu6_buttress_tsc_ticks_to_ns(u64 ticks,const struct ipu6_device * isp)802 u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp)
803 {
804 u64 ns = ticks * 10000;
805
806 /*
807 * converting TSC tick count to ns is calculated by:
808 * Example (TSC clock frequency is 19.2MHz):
809 * ns = ticks * 1000 000 000 / 19.2Mhz
810 * = ticks * 1000 000 000 / 19200000Hz
811 * = ticks * 10000 / 192 ns
812 */
813 return div_u64(ns, isp->buttress.ref_clk);
814 }
815 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6);
816
ipu6_buttress_restore(struct ipu6_device * isp)817 void ipu6_buttress_restore(struct ipu6_device *isp)
818 {
819 struct ipu6_buttress *b = &isp->buttress;
820
821 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
822 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
823 writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT);
824 }
825
ipu6_buttress_init(struct ipu6_device * isp)826 int ipu6_buttress_init(struct ipu6_device *isp)
827 {
828 int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
829 struct ipu6_buttress *b = &isp->buttress;
830 u32 val;
831
832 mutex_init(&b->power_mutex);
833 mutex_init(&b->auth_mutex);
834 mutex_init(&b->cons_mutex);
835 mutex_init(&b->ipc_mutex);
836 init_completion(&b->ish.send_complete);
837 init_completion(&b->cse.send_complete);
838 init_completion(&b->ish.recv_complete);
839 init_completion(&b->cse.recv_complete);
840
841 b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
842 b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
843 b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
844 b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
845 b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
846 b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
847 b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
848 b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
849
850 /* no ISH on IPU6 */
851 memset(&b->ish, 0, sizeof(b->ish));
852 INIT_LIST_HEAD(&b->constraints);
853
854 isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
855 dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
856 isp->secure_mode ? "secure" : "non-secure",
857 readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
858 readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
859
860 b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
861 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
862 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
863
864 /* get ref_clk frequency by reading the indication in btrs control */
865 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
866 val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val);
867
868 switch (val) {
869 case 0x0:
870 b->ref_clk = 240;
871 break;
872 case 0x1:
873 b->ref_clk = 192;
874 break;
875 case 0x2:
876 b->ref_clk = 384;
877 break;
878 default:
879 dev_warn(&isp->pdev->dev,
880 "Unsupported ref clock, use 19.2Mhz by default.\n");
881 b->ref_clk = 192;
882 break;
883 }
884
885 /* Retry couple of times in case of CSE initialization is delayed */
886 do {
887 ret = ipu6_buttress_ipc_reset(isp, &b->cse);
888 if (ret) {
889 dev_warn(&isp->pdev->dev,
890 "IPC reset protocol failed, retrying\n");
891 } else {
892 dev_dbg(&isp->pdev->dev, "IPC reset done\n");
893 return 0;
894 }
895 } while (ipc_reset_retry--);
896
897 dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
898
899 mutex_destroy(&b->power_mutex);
900 mutex_destroy(&b->auth_mutex);
901 mutex_destroy(&b->cons_mutex);
902 mutex_destroy(&b->ipc_mutex);
903
904 return ret;
905 }
906
ipu6_buttress_exit(struct ipu6_device * isp)907 void ipu6_buttress_exit(struct ipu6_device *isp)
908 {
909 struct ipu6_buttress *b = &isp->buttress;
910
911 writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE);
912
913 mutex_destroy(&b->power_mutex);
914 mutex_destroy(&b->auth_mutex);
915 mutex_destroy(&b->cons_mutex);
916 mutex_destroy(&b->ipc_mutex);
917 }
918