xref: /linux/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/bitfield.h>
4 #include <linux/iopoll.h>
5 #include <linux/kernel.h>
6 
7 #include "adf_accel_devices.h"
8 #include "adf_admin.h"
9 #include "adf_common_drv.h"
10 #include "adf_gen4_pm.h"
11 #include "adf_cfg_strings.h"
12 #include "icp_qat_fw_init_admin.h"
13 #include "adf_gen4_hw_data.h"
14 #include "adf_cfg.h"
15 
16 struct adf_gen4_pm_data {
17 	struct work_struct pm_irq_work;
18 	struct adf_accel_dev *accel_dev;
19 	u32 pm_int_sts;
20 };
21 
send_host_msg(struct adf_accel_dev * accel_dev)22 static int send_host_msg(struct adf_accel_dev *accel_dev)
23 {
24 	char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
25 	void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
26 	struct adf_pm *pm = &accel_dev->power_management;
27 	bool pm_idle_support;
28 	u32 msg;
29 	int ret;
30 
31 	msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
32 	if (msg & ADF_GEN4_PM_MSG_PENDING)
33 		return -EBUSY;
34 
35 	adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
36 				ADF_PM_IDLE_SUPPORT, pm_idle_support_cfg);
37 	ret = kstrtobool(pm_idle_support_cfg, &pm_idle_support);
38 	if (ret)
39 		pm_idle_support = true;
40 
41 	if (pm_idle_support)
42 		pm->host_ack_counter++;
43 	else
44 		pm->host_nack_counter++;
45 
46 	/* Send HOST_MSG */
47 	msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK,
48 			 pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE);
49 	msg |= ADF_GEN4_PM_MSG_PENDING;
50 	ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
51 
52 	/* Poll status register to make sure the HOST_MSG has been processed */
53 	return read_poll_timeout(ADF_CSR_RD, msg,
54 				!(msg & ADF_GEN4_PM_MSG_PENDING),
55 				ADF_GEN4_PM_MSG_POLL_DELAY_US,
56 				ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
57 				ADF_GEN4_PM_HOST_MSG);
58 }
59 
pm_bh_handler(struct work_struct * work)60 static void pm_bh_handler(struct work_struct *work)
61 {
62 	struct adf_gen4_pm_data *pm_data =
63 		container_of(work, struct adf_gen4_pm_data, pm_irq_work);
64 	struct adf_accel_dev *accel_dev = pm_data->accel_dev;
65 	void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
66 	struct adf_pm *pm = &accel_dev->power_management;
67 	u32 pm_int_sts = pm_data->pm_int_sts;
68 	u32 val;
69 
70 	/* PM Idle interrupt */
71 	if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
72 		pm->idle_irq_counters++;
73 		/* Issue host message to FW */
74 		if (send_host_msg(accel_dev))
75 			dev_warn_ratelimited(&GET_DEV(accel_dev),
76 					     "Failed to send host msg to FW\n");
77 	}
78 
79 	/* PM throttle interrupt */
80 	if (pm_int_sts & ADF_GEN4_PM_THR_STS)
81 		pm->throttle_irq_counters++;
82 
83 	/* PM fw interrupt */
84 	if (pm_int_sts & ADF_GEN4_PM_FW_INT_STS)
85 		pm->fw_irq_counters++;
86 
87 	/* Clear interrupt status */
88 	ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
89 
90 	/* Reenable PM interrupt */
91 	val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
92 	val &= ~ADF_GEN4_PM_SOU;
93 	ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
94 
95 	kfree(pm_data);
96 }
97 
adf_gen4_handle_pm_interrupt(struct adf_accel_dev * accel_dev)98 bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
99 {
100 	void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
101 	struct adf_gen4_pm_data *pm_data = NULL;
102 	u32 errsou2;
103 	u32 errmsk2;
104 	u32 val;
105 
106 	/* Only handle the interrupt triggered by PM */
107 	errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
108 	if (errmsk2 & ADF_GEN4_PM_SOU)
109 		return false;
110 
111 	errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
112 	if (!(errsou2 & ADF_GEN4_PM_SOU))
113 		return false;
114 
115 	/* Disable interrupt */
116 	val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
117 	val |= ADF_GEN4_PM_SOU;
118 	ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
119 
120 	val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
121 
122 	pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
123 	if (!pm_data)
124 		return false;
125 
126 	pm_data->pm_int_sts = val;
127 	pm_data->accel_dev = accel_dev;
128 
129 	INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
130 	adf_misc_wq_queue_work(&pm_data->pm_irq_work);
131 
132 	return true;
133 }
134 EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
135 
adf_gen4_enable_pm(struct adf_accel_dev * accel_dev)136 int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
137 {
138 	void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
139 	int ret;
140 	u32 val;
141 
142 	ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
143 	if (ret)
144 		return ret;
145 
146 	/* Initialize PM internal data */
147 	adf_gen4_init_dev_pm_data(accel_dev);
148 
149 	/* Enable default PM interrupts: IDLE, THROTTLE */
150 	val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
151 	val |= ADF_GEN4_PM_INT_EN_DEFAULT;
152 
153 	/* Clear interrupt status */
154 	val |= ADF_GEN4_PM_INT_STS_MASK;
155 	ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
156 
157 	/* Unmask PM Interrupt */
158 	val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
159 	val &= ~ADF_GEN4_PM_SOU;
160 	ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
161 
162 	return 0;
163 }
164 EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
165