xref: /linux/drivers/hwmon/xgene-hwmon.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * APM X-Gene SoC Hardware Monitoring Driver
4  *
5  * Copyright (c) 2016, Applied Micro Circuits Corporation
6  * Author: Loc Ho <lho@apm.com>
7  *         Hoan Tran <hotran@apm.com>
8  *
9  * This driver provides the following features:
10  *  - Retrieve CPU total power (uW)
11  *  - Retrieve IO total power (uW)
12  *  - Retrieve SoC temperature (milli-degree C) and alarm
13  */
14 #include <linux/acpi.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/hwmon.h>
17 #include <linux/hwmon-sysfs.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/kfifo.h>
21 #include <linux/mailbox_controller.h>
22 #include <linux/mailbox_client.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 
27 #include <acpi/pcc.h>
28 
29 /* SLIMpro message defines */
30 #define MSG_TYPE_DBG			0
31 #define MSG_TYPE_ERR			7
32 #define MSG_TYPE_PWRMGMT		9
33 
34 #define MSG_TYPE(v)			(((v) & 0xF0000000) >> 28)
35 #define MSG_TYPE_SET(v)			(((v) << 28) & 0xF0000000)
36 #define MSG_SUBTYPE(v)			(((v) & 0x0F000000) >> 24)
37 #define MSG_SUBTYPE_SET(v)		(((v) << 24) & 0x0F000000)
38 
39 #define DBG_SUBTYPE_SENSOR_READ		4
40 #define SENSOR_RD_MSG			0x04FFE902
41 #define SENSOR_RD_EN_ADDR(a)		((a) & 0x000FFFFF)
42 #define PMD_PWR_REG			0x20
43 #define PMD_PWR_MW_REG			0x26
44 #define SOC_PWR_REG			0x21
45 #define SOC_PWR_MW_REG			0x27
46 #define SOC_TEMP_REG			0x10
47 
48 #define TEMP_NEGATIVE_BIT		8
49 #define SENSOR_INVALID_DATA		BIT(15)
50 
51 #define PWRMGMT_SUBTYPE_TPC		1
52 #define TPC_ALARM			2
53 #define TPC_GET_ALARM			3
54 #define TPC_CMD(v)			(((v) & 0x00FF0000) >> 16)
55 #define TPC_CMD_SET(v)			(((v) << 16) & 0x00FF0000)
56 #define TPC_EN_MSG(hndl, cmd, type) \
57 	(MSG_TYPE_SET(MSG_TYPE_PWRMGMT) | \
58 	MSG_SUBTYPE_SET(hndl) | TPC_CMD_SET(cmd) | type)
59 
60 /*
61  * Arbitrary retries in case the remote processor is slow to respond
62  * to PCC commands
63  */
64 #define PCC_NUM_RETRIES			500
65 
66 #define ASYNC_MSG_FIFO_SIZE		16
67 #define MBOX_OP_TIMEOUTMS		1000
68 
69 #define WATT_TO_mWATT(x)		((x) * 1000)
70 #define mWATT_TO_uWATT(x)		((x) * 1000)
71 #define CELSIUS_TO_mCELSIUS(x)		((x) * 1000)
72 
73 #define to_xgene_hwmon_dev(cl)		\
74 	container_of(cl, struct xgene_hwmon_dev, mbox_client)
75 
76 enum xgene_hwmon_version {
77 	XGENE_HWMON_V1 = 0,
78 	XGENE_HWMON_V2 = 1,
79 };
80 
81 struct slimpro_resp_msg {
82 	u32 msg;
83 	u32 param1;
84 	u32 param2;
85 } __packed;
86 
87 struct xgene_hwmon_dev {
88 	struct device		*dev;
89 	struct mbox_chan	*mbox_chan;
90 	struct pcc_mbox_chan	*pcc_chan;
91 	struct mbox_client	mbox_client;
92 	int			mbox_idx;
93 
94 	spinlock_t		kfifo_lock;
95 	struct mutex		rd_mutex;
96 	struct completion	rd_complete;
97 	int			resp_pending;
98 	struct slimpro_resp_msg sync_msg;
99 
100 	struct work_struct	workq;
101 	struct kfifo_rec_ptr_1	async_msg_fifo;
102 
103 	struct device		*hwmon_dev;
104 	bool			temp_critical_alarm;
105 
106 	phys_addr_t		comm_base_addr;
107 	void			*pcc_comm_addr;
108 	u64			usecs_lat;
109 };
110 
111 /*
112  * This function tests and clears a bitmask then returns its old value
113  */
xgene_word_tst_and_clr(u16 * addr,u16 mask)114 static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
115 {
116 	u16 ret, val;
117 
118 	val = le16_to_cpu(READ_ONCE(*addr));
119 	ret = val & mask;
120 	val &= ~mask;
121 	WRITE_ONCE(*addr, cpu_to_le16(val));
122 
123 	return ret;
124 }
125 
xgene_hwmon_pcc_rd(struct xgene_hwmon_dev * ctx,u32 * msg)126 static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
127 {
128 	struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
129 	u32 *ptr = (void *)(generic_comm_base + 1);
130 	int rc, i;
131 	u16 val;
132 
133 	mutex_lock(&ctx->rd_mutex);
134 	init_completion(&ctx->rd_complete);
135 	ctx->resp_pending = true;
136 
137 	/* Write signature for subspace */
138 	WRITE_ONCE(generic_comm_base->signature,
139 		   cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx));
140 
141 	/* Write to the shared command region */
142 	WRITE_ONCE(generic_comm_base->command,
143 		   cpu_to_le16(MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INTR));
144 
145 	/* Flip CMD COMPLETE bit */
146 	val = le16_to_cpu(READ_ONCE(generic_comm_base->status));
147 	val &= ~PCC_STATUS_CMD_COMPLETE;
148 	WRITE_ONCE(generic_comm_base->status, cpu_to_le16(val));
149 
150 	/* Copy the message to the PCC comm space */
151 	for (i = 0; i < sizeof(struct slimpro_resp_msg) / 4; i++)
152 		WRITE_ONCE(ptr[i], cpu_to_le32(msg[i]));
153 
154 	/* Ring the doorbell */
155 	rc = mbox_send_message(ctx->mbox_chan, msg);
156 	if (rc < 0) {
157 		dev_err(ctx->dev, "Mailbox send error %d\n", rc);
158 		goto err;
159 	}
160 	if (!wait_for_completion_timeout(&ctx->rd_complete,
161 					 usecs_to_jiffies(ctx->usecs_lat))) {
162 		dev_err(ctx->dev, "Mailbox operation timed out\n");
163 		rc = -ETIMEDOUT;
164 		goto err;
165 	}
166 
167 	/* Check for error message */
168 	if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
169 		rc = -EINVAL;
170 		goto err;
171 	}
172 
173 	msg[0] = ctx->sync_msg.msg;
174 	msg[1] = ctx->sync_msg.param1;
175 	msg[2] = ctx->sync_msg.param2;
176 
177 err:
178 	mbox_chan_txdone(ctx->mbox_chan, 0);
179 	ctx->resp_pending = false;
180 	mutex_unlock(&ctx->rd_mutex);
181 	return rc;
182 }
183 
xgene_hwmon_rd(struct xgene_hwmon_dev * ctx,u32 * msg)184 static int xgene_hwmon_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
185 {
186 	int rc;
187 
188 	mutex_lock(&ctx->rd_mutex);
189 	init_completion(&ctx->rd_complete);
190 	ctx->resp_pending = true;
191 
192 	rc = mbox_send_message(ctx->mbox_chan, msg);
193 	if (rc < 0) {
194 		dev_err(ctx->dev, "Mailbox send error %d\n", rc);
195 		goto err;
196 	}
197 
198 	if (!wait_for_completion_timeout(&ctx->rd_complete,
199 					 msecs_to_jiffies(MBOX_OP_TIMEOUTMS))) {
200 		dev_err(ctx->dev, "Mailbox operation timed out\n");
201 		rc = -ETIMEDOUT;
202 		goto err;
203 	}
204 
205 	/* Check for error message */
206 	if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
207 		rc = -EINVAL;
208 		goto err;
209 	}
210 
211 	msg[0] = ctx->sync_msg.msg;
212 	msg[1] = ctx->sync_msg.param1;
213 	msg[2] = ctx->sync_msg.param2;
214 
215 err:
216 	ctx->resp_pending = false;
217 	mutex_unlock(&ctx->rd_mutex);
218 	return rc;
219 }
220 
xgene_hwmon_reg_map_rd(struct xgene_hwmon_dev * ctx,u32 addr,u32 * data)221 static int xgene_hwmon_reg_map_rd(struct xgene_hwmon_dev *ctx, u32 addr,
222 				  u32 *data)
223 {
224 	u32 msg[3];
225 	int rc;
226 
227 	msg[0] = SENSOR_RD_MSG;
228 	msg[1] = SENSOR_RD_EN_ADDR(addr);
229 	msg[2] = 0;
230 
231 	if (acpi_disabled)
232 		rc = xgene_hwmon_rd(ctx, msg);
233 	else
234 		rc = xgene_hwmon_pcc_rd(ctx, msg);
235 
236 	if (rc < 0)
237 		return rc;
238 
239 	/*
240 	 * Check if sensor data is valid.
241 	 */
242 	if (msg[1] & SENSOR_INVALID_DATA)
243 		return -ENODATA;
244 
245 	*data = msg[1];
246 
247 	return rc;
248 }
249 
xgene_hwmon_get_notification_msg(struct xgene_hwmon_dev * ctx,u32 * amsg)250 static int xgene_hwmon_get_notification_msg(struct xgene_hwmon_dev *ctx,
251 					    u32 *amsg)
252 {
253 	u32 msg[3];
254 	int rc;
255 
256 	msg[0] = TPC_EN_MSG(PWRMGMT_SUBTYPE_TPC, TPC_GET_ALARM, 0);
257 	msg[1] = 0;
258 	msg[2] = 0;
259 
260 	rc = xgene_hwmon_pcc_rd(ctx, msg);
261 	if (rc < 0)
262 		return rc;
263 
264 	amsg[0] = msg[0];
265 	amsg[1] = msg[1];
266 	amsg[2] = msg[2];
267 
268 	return rc;
269 }
270 
xgene_hwmon_get_cpu_pwr(struct xgene_hwmon_dev * ctx,u32 * val)271 static int xgene_hwmon_get_cpu_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
272 {
273 	u32 watt, mwatt;
274 	int rc;
275 
276 	rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_REG, &watt);
277 	if (rc < 0)
278 		return rc;
279 
280 	rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_MW_REG, &mwatt);
281 	if (rc < 0)
282 		return rc;
283 
284 	*val = WATT_TO_mWATT(watt) + mwatt;
285 	return 0;
286 }
287 
xgene_hwmon_get_io_pwr(struct xgene_hwmon_dev * ctx,u32 * val)288 static int xgene_hwmon_get_io_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
289 {
290 	u32 watt, mwatt;
291 	int rc;
292 
293 	rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_REG, &watt);
294 	if (rc < 0)
295 		return rc;
296 
297 	rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_MW_REG, &mwatt);
298 	if (rc < 0)
299 		return rc;
300 
301 	*val = WATT_TO_mWATT(watt) + mwatt;
302 	return 0;
303 }
304 
xgene_hwmon_get_temp(struct xgene_hwmon_dev * ctx,u32 * val)305 static int xgene_hwmon_get_temp(struct xgene_hwmon_dev *ctx, u32 *val)
306 {
307 	return xgene_hwmon_reg_map_rd(ctx, SOC_TEMP_REG, val);
308 }
309 
310 /*
311  * Sensor temperature/power functions
312  */
temp1_input_show(struct device * dev,struct device_attribute * attr,char * buf)313 static ssize_t temp1_input_show(struct device *dev,
314 				struct device_attribute *attr,
315 				char *buf)
316 {
317 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
318 	int rc, temp;
319 	u32 val;
320 
321 	rc = xgene_hwmon_get_temp(ctx, &val);
322 	if (rc < 0)
323 		return rc;
324 
325 	temp = sign_extend32(val, TEMP_NEGATIVE_BIT);
326 
327 	return sysfs_emit(buf, "%d\n", CELSIUS_TO_mCELSIUS(temp));
328 }
329 
temp1_label_show(struct device * dev,struct device_attribute * attr,char * buf)330 static ssize_t temp1_label_show(struct device *dev,
331 				struct device_attribute *attr,
332 				char *buf)
333 {
334 	return sysfs_emit(buf, "SoC Temperature\n");
335 }
336 
temp1_critical_alarm_show(struct device * dev,struct device_attribute * devattr,char * buf)337 static ssize_t temp1_critical_alarm_show(struct device *dev,
338 					 struct device_attribute *devattr,
339 					 char *buf)
340 {
341 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
342 
343 	return sysfs_emit(buf, "%d\n", ctx->temp_critical_alarm);
344 }
345 
power1_label_show(struct device * dev,struct device_attribute * attr,char * buf)346 static ssize_t power1_label_show(struct device *dev,
347 				 struct device_attribute *attr,
348 				 char *buf)
349 {
350 	return sysfs_emit(buf, "CPU power\n");
351 }
352 
power2_label_show(struct device * dev,struct device_attribute * attr,char * buf)353 static ssize_t power2_label_show(struct device *dev,
354 				 struct device_attribute *attr,
355 				 char *buf)
356 {
357 	return sysfs_emit(buf, "IO power\n");
358 }
359 
power1_input_show(struct device * dev,struct device_attribute * attr,char * buf)360 static ssize_t power1_input_show(struct device *dev,
361 				 struct device_attribute *attr,
362 				 char *buf)
363 {
364 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
365 	u32 val;
366 	int rc;
367 
368 	rc = xgene_hwmon_get_cpu_pwr(ctx, &val);
369 	if (rc < 0)
370 		return rc;
371 
372 	return sysfs_emit(buf, "%u\n", mWATT_TO_uWATT(val));
373 }
374 
power2_input_show(struct device * dev,struct device_attribute * attr,char * buf)375 static ssize_t power2_input_show(struct device *dev,
376 				 struct device_attribute *attr,
377 				 char *buf)
378 {
379 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
380 	u32 val;
381 	int rc;
382 
383 	rc = xgene_hwmon_get_io_pwr(ctx, &val);
384 	if (rc < 0)
385 		return rc;
386 
387 	return sysfs_emit(buf, "%u\n", mWATT_TO_uWATT(val));
388 }
389 
390 static DEVICE_ATTR_RO(temp1_label);
391 static DEVICE_ATTR_RO(temp1_input);
392 static DEVICE_ATTR_RO(temp1_critical_alarm);
393 static DEVICE_ATTR_RO(power1_label);
394 static DEVICE_ATTR_RO(power1_input);
395 static DEVICE_ATTR_RO(power2_label);
396 static DEVICE_ATTR_RO(power2_input);
397 
398 static struct attribute *xgene_hwmon_attrs[] = {
399 	&dev_attr_temp1_label.attr,
400 	&dev_attr_temp1_input.attr,
401 	&dev_attr_temp1_critical_alarm.attr,
402 	&dev_attr_power1_label.attr,
403 	&dev_attr_power1_input.attr,
404 	&dev_attr_power2_label.attr,
405 	&dev_attr_power2_input.attr,
406 	NULL,
407 };
408 
409 ATTRIBUTE_GROUPS(xgene_hwmon);
410 
xgene_hwmon_tpc_alarm(struct xgene_hwmon_dev * ctx,struct slimpro_resp_msg * amsg)411 static int xgene_hwmon_tpc_alarm(struct xgene_hwmon_dev *ctx,
412 				 struct slimpro_resp_msg *amsg)
413 {
414 	ctx->temp_critical_alarm = !!amsg->param2;
415 	sysfs_notify(&ctx->dev->kobj, NULL, "temp1_critical_alarm");
416 
417 	return 0;
418 }
419 
xgene_hwmon_process_pwrmsg(struct xgene_hwmon_dev * ctx,struct slimpro_resp_msg * amsg)420 static void xgene_hwmon_process_pwrmsg(struct xgene_hwmon_dev *ctx,
421 				       struct slimpro_resp_msg *amsg)
422 {
423 	if ((MSG_SUBTYPE(amsg->msg) == PWRMGMT_SUBTYPE_TPC) &&
424 	    (TPC_CMD(amsg->msg) == TPC_ALARM))
425 		xgene_hwmon_tpc_alarm(ctx, amsg);
426 }
427 
428 /*
429  * This function is called to process async work queue
430  */
xgene_hwmon_evt_work(struct work_struct * work)431 static void xgene_hwmon_evt_work(struct work_struct *work)
432 {
433 	struct slimpro_resp_msg amsg;
434 	struct xgene_hwmon_dev *ctx;
435 	int ret;
436 
437 	ctx = container_of(work, struct xgene_hwmon_dev, workq);
438 	while (kfifo_out_spinlocked(&ctx->async_msg_fifo, &amsg,
439 				    sizeof(struct slimpro_resp_msg),
440 				    &ctx->kfifo_lock)) {
441 		/*
442 		 * If PCC, send a consumer command to Platform to get info
443 		 * If Slimpro Mailbox, get message from specific FIFO
444 		 */
445 		if (!acpi_disabled) {
446 			ret = xgene_hwmon_get_notification_msg(ctx,
447 							       (u32 *)&amsg);
448 			if (ret < 0)
449 				continue;
450 		}
451 
452 		if (MSG_TYPE(amsg.msg) == MSG_TYPE_PWRMGMT)
453 			xgene_hwmon_process_pwrmsg(ctx, &amsg);
454 	}
455 }
456 
xgene_hwmon_rx_ready(struct xgene_hwmon_dev * ctx,void * msg)457 static int xgene_hwmon_rx_ready(struct xgene_hwmon_dev *ctx, void *msg)
458 {
459 	if (IS_ERR_OR_NULL(ctx->hwmon_dev) && !ctx->resp_pending) {
460 		/* Enqueue to the FIFO */
461 		kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
462 				    sizeof(struct slimpro_resp_msg),
463 				    &ctx->kfifo_lock);
464 		return -ENODEV;
465 	}
466 
467 	return 0;
468 }
469 
470 /*
471  * This function is called when the SLIMpro Mailbox received a message
472  */
xgene_hwmon_rx_cb(struct mbox_client * cl,void * msg)473 static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
474 {
475 	struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
476 
477 	/*
478 	 * While the driver registers with the mailbox framework, an interrupt
479 	 * can be pending before the probe function completes its
480 	 * initialization. If such condition occurs, just queue up the message
481 	 * as the driver is not ready for servicing the callback.
482 	 */
483 	if (xgene_hwmon_rx_ready(ctx, msg) < 0)
484 		return;
485 
486 	/*
487 	 * Response message format:
488 	 * msg[0] is the return code of the operation
489 	 * msg[1] is the first parameter word
490 	 * msg[2] is the second parameter word
491 	 *
492 	 * As message only supports dword size, just assign it.
493 	 */
494 
495 	/* Check for sync query */
496 	if (ctx->resp_pending &&
497 	    ((MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_ERR) ||
498 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_DBG &&
499 	      MSG_SUBTYPE(((u32 *)msg)[0]) == DBG_SUBTYPE_SENSOR_READ) ||
500 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_PWRMGMT &&
501 	      MSG_SUBTYPE(((u32 *)msg)[0]) == PWRMGMT_SUBTYPE_TPC &&
502 	      TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
503 		ctx->sync_msg.msg = ((u32 *)msg)[0];
504 		ctx->sync_msg.param1 = ((u32 *)msg)[1];
505 		ctx->sync_msg.param2 = ((u32 *)msg)[2];
506 
507 		/* Operation waiting for response */
508 		complete(&ctx->rd_complete);
509 
510 		return;
511 	}
512 
513 	/* Enqueue to the FIFO */
514 	kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
515 			    sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
516 	/* Schedule the bottom handler */
517 	schedule_work(&ctx->workq);
518 }
519 
520 /*
521  * This function is called when the PCC Mailbox received a message
522  */
xgene_hwmon_pcc_rx_cb(struct mbox_client * cl,void * msg)523 static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
524 {
525 	struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
526 	struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
527 	struct slimpro_resp_msg amsg;
528 
529 	/*
530 	 * While the driver registers with the mailbox framework, an interrupt
531 	 * can be pending before the probe function completes its
532 	 * initialization. If such condition occurs, just queue up the message
533 	 * as the driver is not ready for servicing the callback.
534 	 */
535 	if (xgene_hwmon_rx_ready(ctx, &amsg) < 0)
536 		return;
537 
538 	msg = generic_comm_base + 1;
539 	/* Check if platform sends interrupt */
540 	if (!xgene_word_tst_and_clr(&generic_comm_base->status,
541 				    PCC_STATUS_SCI_DOORBELL))
542 		return;
543 
544 	/*
545 	 * Response message format:
546 	 * msg[0] is the return code of the operation
547 	 * msg[1] is the first parameter word
548 	 * msg[2] is the second parameter word
549 	 *
550 	 * As message only supports dword size, just assign it.
551 	 */
552 
553 	/* Check for sync query */
554 	if (ctx->resp_pending &&
555 	    ((MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_ERR) ||
556 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_DBG &&
557 	      MSG_SUBTYPE(((u32 *)msg)[0]) == DBG_SUBTYPE_SENSOR_READ) ||
558 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_PWRMGMT &&
559 	      MSG_SUBTYPE(((u32 *)msg)[0]) == PWRMGMT_SUBTYPE_TPC &&
560 	      TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
561 		/* Check if platform completes command */
562 		if (xgene_word_tst_and_clr(&generic_comm_base->status,
563 					   PCC_STATUS_CMD_COMPLETE)) {
564 			ctx->sync_msg.msg = ((u32 *)msg)[0];
565 			ctx->sync_msg.param1 = ((u32 *)msg)[1];
566 			ctx->sync_msg.param2 = ((u32 *)msg)[2];
567 
568 			/* Operation waiting for response */
569 			complete(&ctx->rd_complete);
570 
571 			return;
572 		}
573 	}
574 
575 	/*
576 	 * Platform notifies interrupt to OSPM.
577 	 * OPSM schedules a consumer command to get this information
578 	 * in a workqueue. Platform must wait until OSPM has issued
579 	 * a consumer command that serves this notification.
580 	 */
581 
582 	/* Enqueue to the FIFO */
583 	kfifo_in_spinlocked(&ctx->async_msg_fifo, &amsg,
584 			    sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
585 	/* Schedule the bottom handler */
586 	schedule_work(&ctx->workq);
587 }
588 
xgene_hwmon_tx_done(struct mbox_client * cl,void * msg,int ret)589 static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
590 {
591 	if (ret) {
592 		dev_dbg(cl->dev, "TX did not complete: CMD sent:%x, ret:%d\n",
593 			*(u16 *)msg, ret);
594 	} else {
595 		dev_dbg(cl->dev, "TX completed. CMD sent:%x, ret:%d\n",
596 			*(u16 *)msg, ret);
597 	}
598 }
599 
600 #ifdef CONFIG_ACPI
601 static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
602 	{"APMC0D29", XGENE_HWMON_V1},
603 	{"APMC0D8A", XGENE_HWMON_V2},
604 	{},
605 };
606 MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
607 #endif
608 
xgene_hwmon_probe(struct platform_device * pdev)609 static int xgene_hwmon_probe(struct platform_device *pdev)
610 {
611 	struct xgene_hwmon_dev *ctx;
612 	struct mbox_client *cl;
613 	int rc;
614 
615 	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
616 	if (!ctx)
617 		return -ENOMEM;
618 
619 	ctx->dev = &pdev->dev;
620 	platform_set_drvdata(pdev, ctx);
621 	cl = &ctx->mbox_client;
622 
623 	spin_lock_init(&ctx->kfifo_lock);
624 	mutex_init(&ctx->rd_mutex);
625 
626 	rc = kfifo_alloc(&ctx->async_msg_fifo,
627 			 sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
628 			 GFP_KERNEL);
629 	if (rc)
630 		return -ENOMEM;
631 
632 	INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
633 
634 	/* Request mailbox channel */
635 	cl->dev = &pdev->dev;
636 	cl->tx_done = xgene_hwmon_tx_done;
637 	cl->tx_block = false;
638 	cl->tx_tout = MBOX_OP_TIMEOUTMS;
639 	cl->knows_txdone = false;
640 	if (acpi_disabled) {
641 		cl->rx_callback = xgene_hwmon_rx_cb;
642 		ctx->mbox_chan = mbox_request_channel(cl, 0);
643 		if (IS_ERR(ctx->mbox_chan)) {
644 			dev_err(&pdev->dev,
645 				"SLIMpro mailbox channel request failed\n");
646 			rc = -ENODEV;
647 			goto out_mbox_free;
648 		}
649 	} else {
650 		struct pcc_mbox_chan *pcc_chan;
651 		const struct acpi_device_id *acpi_id;
652 		int version;
653 
654 		acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
655 					    &pdev->dev);
656 		if (!acpi_id) {
657 			rc = -EINVAL;
658 			goto out_mbox_free;
659 		}
660 
661 		version = (int)acpi_id->driver_data;
662 
663 		if (device_property_read_u32(&pdev->dev, "pcc-channel",
664 					     &ctx->mbox_idx)) {
665 			dev_err(&pdev->dev, "no pcc-channel property\n");
666 			rc = -ENODEV;
667 			goto out_mbox_free;
668 		}
669 
670 		cl->rx_callback = xgene_hwmon_pcc_rx_cb;
671 		pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
672 		if (IS_ERR(pcc_chan)) {
673 			dev_err(&pdev->dev,
674 				"PPC channel request failed\n");
675 			rc = -ENODEV;
676 			goto out_mbox_free;
677 		}
678 
679 		ctx->pcc_chan = pcc_chan;
680 		ctx->mbox_chan = pcc_chan->mchan;
681 
682 		if (!ctx->mbox_chan->mbox->txdone_irq) {
683 			dev_err(&pdev->dev, "PCC IRQ not supported\n");
684 			rc = -ENODEV;
685 			goto out;
686 		}
687 
688 		/*
689 		 * This is the shared communication region
690 		 * for the OS and Platform to communicate over.
691 		 */
692 		ctx->comm_base_addr = pcc_chan->shmem_base_addr;
693 		if (ctx->comm_base_addr) {
694 			if (version == XGENE_HWMON_V2)
695 				ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
696 								  ctx->comm_base_addr,
697 								  pcc_chan->shmem_size);
698 			else
699 				ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
700 								   ctx->comm_base_addr,
701 								   pcc_chan->shmem_size,
702 								   MEMREMAP_WB);
703 		} else {
704 			dev_err(&pdev->dev, "Failed to get PCC comm region\n");
705 			rc = -ENODEV;
706 			goto out;
707 		}
708 
709 		if (!ctx->pcc_comm_addr) {
710 			dev_err(&pdev->dev,
711 				"Failed to ioremap PCC comm region\n");
712 			rc = -ENOMEM;
713 			goto out;
714 		}
715 
716 		/*
717 		 * pcc_chan->latency is just a Nominal value. In reality
718 		 * the remote processor could be much slower to reply.
719 		 * So add an arbitrary amount of wait on top of Nominal.
720 		 */
721 		ctx->usecs_lat = PCC_NUM_RETRIES * pcc_chan->latency;
722 	}
723 
724 	ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
725 							   "apm_xgene",
726 							   ctx,
727 							   xgene_hwmon_groups);
728 	if (IS_ERR(ctx->hwmon_dev)) {
729 		dev_err(&pdev->dev, "Failed to register HW monitor device\n");
730 		rc = PTR_ERR(ctx->hwmon_dev);
731 		goto out;
732 	}
733 
734 	/*
735 	 * Schedule the bottom handler if there is a pending message.
736 	 */
737 	schedule_work(&ctx->workq);
738 
739 	dev_info(&pdev->dev, "APM X-Gene SoC HW monitor driver registered\n");
740 
741 	return 0;
742 
743 out:
744 	if (acpi_disabled)
745 		mbox_free_channel(ctx->mbox_chan);
746 	else
747 		pcc_mbox_free_channel(ctx->pcc_chan);
748 out_mbox_free:
749 	kfifo_free(&ctx->async_msg_fifo);
750 
751 	return rc;
752 }
753 
xgene_hwmon_remove(struct platform_device * pdev)754 static void xgene_hwmon_remove(struct platform_device *pdev)
755 {
756 	struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
757 
758 	cancel_work_sync(&ctx->workq);
759 	hwmon_device_unregister(ctx->hwmon_dev);
760 	kfifo_free(&ctx->async_msg_fifo);
761 	if (acpi_disabled)
762 		mbox_free_channel(ctx->mbox_chan);
763 	else
764 		pcc_mbox_free_channel(ctx->pcc_chan);
765 }
766 
767 static const struct of_device_id xgene_hwmon_of_match[] = {
768 	{.compatible = "apm,xgene-slimpro-hwmon"},
769 	{}
770 };
771 MODULE_DEVICE_TABLE(of, xgene_hwmon_of_match);
772 
773 static struct platform_driver xgene_hwmon_driver = {
774 	.probe = xgene_hwmon_probe,
775 	.remove_new = xgene_hwmon_remove,
776 	.driver = {
777 		.name = "xgene-slimpro-hwmon",
778 		.of_match_table = xgene_hwmon_of_match,
779 		.acpi_match_table = ACPI_PTR(xgene_hwmon_acpi_match),
780 	},
781 };
782 module_platform_driver(xgene_hwmon_driver);
783 
784 MODULE_DESCRIPTION("APM X-Gene SoC hardware monitor");
785 MODULE_LICENSE("GPL");
786