xref: /linux/drivers/infiniband/hw/irdma/ig3rdma_if.c (revision 55aa394a5ed871208eac11c5f4677cafd258c4dd)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2023 - 2024 Intel Corporation */
3 
4 #include "main.h"
5 #include <linux/net/intel/iidc_rdma_idpf.h>
6 #include "ig3rdma_hw.h"
7 
ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info * cdev_info,struct iidc_rdma_event * event)8 static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
9 					   struct iidc_rdma_event *event)
10 {
11 	struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
12 
13 	if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
14 		rf->reset = true;
15 		rf->sc_dev.vchnl_up = false;
16 	}
17 }
18 
ig3rdma_vchnl_send_sync(struct irdma_sc_dev * dev,u8 * msg,u16 len,u8 * recv_msg,u16 * recv_len)19 int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
20 			    u8 *recv_msg, u16 *recv_len)
21 {
22 	struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
23 	int ret;
24 
25 	ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
26 					 recv_len);
27 	if (ret == -ETIMEDOUT) {
28 		ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
29 			  "Virtual channel Req <-> Resp completion timeout\n");
30 		dev->vchnl_up = false;
31 	}
32 
33 	return ret;
34 }
35 
ig3rdma_vchnl_init(struct irdma_pci_f * rf,struct iidc_rdma_core_dev_info * cdev_info,u8 * rdma_ver)36 static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
37 			      struct iidc_rdma_core_dev_info *cdev_info,
38 			      u8 *rdma_ver)
39 {
40 	struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
41 	struct irdma_vchnl_init_info virt_info;
42 	u8 gen = rf->rdma_ver;
43 	int ret;
44 
45 	rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
46 	if (!rf->vchnl_wq)
47 		return -ENOMEM;
48 
49 	mutex_init(&rf->sc_dev.vchnl_mutex);
50 
51 	virt_info.is_pf = !idc_priv->ftype;
52 	virt_info.hw_rev = gen;
53 	virt_info.privileged = gen == IRDMA_GEN_2;
54 	virt_info.vchnl_wq = rf->vchnl_wq;
55 	ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
56 	if (ret) {
57 		destroy_workqueue(rf->vchnl_wq);
58 		mutex_destroy(&rf->sc_dev.vchnl_mutex);
59 		return ret;
60 	}
61 
62 	*rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
63 
64 	return 0;
65 }
66 
67 /**
68  * ig3rdma_request_reset - Request a reset
69  * @rf: RDMA PCI function
70  */
ig3rdma_request_reset(struct irdma_pci_f * rf)71 static void ig3rdma_request_reset(struct irdma_pci_f *rf)
72 {
73 	ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
74 	idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
75 }
76 
ig3rdma_cfg_regions(struct irdma_hw * hw,struct iidc_rdma_core_dev_info * cdev_info)77 static int ig3rdma_cfg_regions(struct irdma_hw *hw,
78 			       struct iidc_rdma_core_dev_info *cdev_info)
79 {
80 	struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
81 	struct pci_dev *pdev = cdev_info->pdev;
82 	int i;
83 
84 	switch (idc_priv->ftype) {
85 	case IIDC_FUNCTION_TYPE_PF:
86 		hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
87 		hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
88 		break;
89 	case IIDC_FUNCTION_TYPE_VF:
90 		hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
91 		hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
92 		break;
93 	default:
94 		return -ENODEV;
95 	}
96 
97 	hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
98 				    hw->rdma_reg.len);
99 
100 	if (!hw->rdma_reg.addr)
101 		return -ENOMEM;
102 
103 	hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
104 	hw->io_regs = kcalloc(hw->num_io_regions,
105 			      sizeof(struct irdma_mmio_region), GFP_KERNEL);
106 
107 	if (!hw->io_regs) {
108 		iounmap(hw->rdma_reg.addr);
109 		return -ENOMEM;
110 	}
111 
112 	for (i = 0; i < hw->num_io_regions; i++) {
113 		hw->io_regs[i].addr =
114 			idc_priv->mapped_mem_regions[i].region_addr;
115 		hw->io_regs[i].len =
116 			le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
117 		hw->io_regs[i].offset =
118 			le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
119 	}
120 
121 	return 0;
122 }
123 
ig3rdma_decfg_rf(struct irdma_pci_f * rf)124 static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
125 {
126 	struct irdma_hw *hw = &rf->hw;
127 
128 	mutex_destroy(&rf->ah_tbl_lock);
129 	destroy_workqueue(rf->vchnl_wq);
130 	mutex_destroy(&rf->sc_dev.vchnl_mutex);
131 	kfree(hw->io_regs);
132 	iounmap(hw->rdma_reg.addr);
133 }
134 
ig3rdma_cfg_rf(struct irdma_pci_f * rf,struct iidc_rdma_core_dev_info * cdev_info)135 static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
136 			  struct iidc_rdma_core_dev_info *cdev_info)
137 {
138 	struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
139 	int err;
140 
141 	rf->sc_dev.hw = &rf->hw;
142 	rf->cdev = cdev_info;
143 	rf->pcidev = cdev_info->pdev;
144 	rf->hw.device = &rf->pcidev->dev;
145 	rf->msix_count = idc_priv->msix_count;
146 	rf->msix_entries = idc_priv->msix_entries;
147 
148 	err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
149 	if (err)
150 		return err;
151 
152 	err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
153 	if (err) {
154 		destroy_workqueue(rf->vchnl_wq);
155 		mutex_destroy(&rf->sc_dev.vchnl_mutex);
156 		return err;
157 	}
158 
159 	rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
160 	rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
161 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
162 	rf->gen_ops.request_reset = ig3rdma_request_reset;
163 	rf->limits_sel = 7;
164 	mutex_init(&rf->ah_tbl_lock);
165 
166 	return 0;
167 }
168 
ig3rdma_core_probe(struct auxiliary_device * aux_dev,const struct auxiliary_device_id * id)169 static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
170 			      const struct auxiliary_device_id *id)
171 {
172 	struct iidc_rdma_core_auxiliary_dev *idc_adev =
173 		container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
174 	struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
175 	struct irdma_pci_f *rf;
176 	int err;
177 
178 	rf = kzalloc(sizeof(*rf), GFP_KERNEL);
179 	if (!rf)
180 		return -ENOMEM;
181 
182 	err = ig3rdma_cfg_rf(rf, cdev_info);
183 	if (err)
184 		goto err_cfg_rf;
185 
186 	err = irdma_ctrl_init_hw(rf);
187 	if (err)
188 		goto err_ctrl_init;
189 
190 	auxiliary_set_drvdata(aux_dev, rf);
191 
192 	err = idpf_idc_vport_dev_ctrl(cdev_info, true);
193 	if (err)
194 		goto err_vport_ctrl;
195 
196 	return 0;
197 
198 err_vport_ctrl:
199 	irdma_ctrl_deinit_hw(rf);
200 err_ctrl_init:
201 	ig3rdma_decfg_rf(rf);
202 err_cfg_rf:
203 	kfree(rf);
204 
205 	return err;
206 }
207 
ig3rdma_core_remove(struct auxiliary_device * aux_dev)208 static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
209 {
210 	struct iidc_rdma_core_auxiliary_dev *idc_adev =
211 		container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
212 	struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
213 	struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
214 
215 	idpf_idc_vport_dev_ctrl(cdev_info, false);
216 	irdma_ctrl_deinit_hw(rf);
217 	ig3rdma_decfg_rf(rf);
218 	kfree(rf);
219 }
220 
221 static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
222 	{.name = "idpf.8086.rdma.core", },
223 	{},
224 };
225 
226 MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
227 
228 struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
229 	.adrv = {
230 		.name = "core",
231 		.id_table = ig3rdma_core_auxiliary_id_table,
232 		.probe = ig3rdma_core_probe,
233 		.remove = ig3rdma_core_remove,
234 	},
235 	.event_handler = ig3rdma_idc_core_event_handler,
236 };
237