xref: /linux/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c (revision 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2021 Intel Corporation */
3 #include <linux/delay.h>
4 #include <linux/pci.h>
5 #include "adf_accel_devices.h"
6 #include "adf_pfvf_msg.h"
7 #include "adf_pfvf_pf_msg.h"
8 #include "adf_pfvf_pf_proto.h"
9 
10 #define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY	100
11 #define ADF_VF_SHUTDOWN_RETRY			100
12 
adf_pf2vf_notify_restarting(struct adf_accel_dev * accel_dev)13 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
14 {
15 	struct adf_accel_vf_info *vf;
16 	struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
17 	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
18 
19 	dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n");
20 	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
21 		if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
22 			vf->restarting = true;
23 		else
24 			vf->restarting = false;
25 
26 		if (!vf->init)
27 			continue;
28 
29 		if (adf_send_pf2vf_msg(accel_dev, i, msg))
30 			dev_err(&GET_DEV(accel_dev),
31 				"Failed to send restarting msg to VF%d\n", i);
32 	}
33 }
34 
adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev * accel_dev)35 void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
36 {
37 	int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
38 	int i, retries = ADF_VF_SHUTDOWN_RETRY;
39 	struct adf_accel_vf_info *vf;
40 	bool vf_running;
41 
42 	dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n");
43 	do {
44 		vf_running = false;
45 		for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++)
46 			if (vf->restarting)
47 				vf_running = true;
48 		if (!vf_running)
49 			break;
50 		msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY);
51 	} while (--retries);
52 
53 	if (vf_running)
54 		dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n");
55 }
56 
adf_pf2vf_notify_restarted(struct adf_accel_dev * accel_dev)57 void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
58 {
59 	struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED };
60 	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
61 	struct adf_accel_vf_info *vf;
62 
63 	dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n");
64 	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
65 		if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
66 		    adf_send_pf2vf_msg(accel_dev, i, msg))
67 			dev_err(&GET_DEV(accel_dev),
68 				"Failed to send restarted msg to VF%d\n", i);
69 	}
70 }
71 
adf_pf2vf_notify_fatal_error(struct adf_accel_dev * accel_dev)72 void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
73 {
74 	struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR };
75 	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
76 	struct adf_accel_vf_info *vf;
77 
78 	dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n");
79 	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
80 		if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
81 		    adf_send_pf2vf_msg(accel_dev, i, msg))
82 			dev_err(&GET_DEV(accel_dev),
83 				"Failed to send fatal error msg to VF%d\n", i);
84 	}
85 }
86 
adf_pf_capabilities_msg_provider(struct adf_accel_dev * accel_dev,u8 * buffer,u8 compat)87 int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
88 				     u8 *buffer, u8 compat)
89 {
90 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
91 	struct capabilities_v2 caps_msg;
92 
93 	caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
94 	caps_msg.capabilities = hw_data->accel_capabilities_mask;
95 
96 	caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
97 	caps_msg.hdr.payload_size =
98 			ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
99 
100 	memcpy(buffer, &caps_msg, sizeof(caps_msg));
101 
102 	return 0;
103 }
104 
adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev * accel_dev,u8 * buffer,u8 compat)105 int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
106 				    u8 *buffer, u8 compat)
107 {
108 	struct ring_to_svc_map_v1 rts_map_msg;
109 
110 	rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
111 	rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
112 	rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
113 
114 	memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
115 
116 	return 0;
117 }
118