xref: /linux/drivers/infiniband/hw/irdma/ig3rdma_if.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2023 - 2024 Intel Corporation */
3 
4 #include "main.h"
5 #include <linux/net/intel/iidc_rdma_idpf.h>
6 #include "ig3rdma_hw.h"
7 
8 static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
9 					   struct iidc_rdma_event *event)
10 {
11 	struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
12 
13 	if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
14 		rf->reset = true;
15 		rf->sc_dev.vchnl_up = false;
16 	}
17 }
18 
19 int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
20 			    u8 *recv_msg, u16 *recv_len)
21 {
22 	struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
23 	int ret;
24 
25 	ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
26 					 recv_len);
27 	if (ret == -ETIMEDOUT) {
28 		ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
29 			  "Virtual channel Req <-> Resp completion timeout\n");
30 		dev->vchnl_up = false;
31 	}
32 
33 	return ret;
34 }
35 
36 static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
37 			      struct iidc_rdma_core_dev_info *cdev_info,
38 			      u8 *rdma_ver)
39 {
40 	struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
41 	struct irdma_vchnl_init_info virt_info;
42 	u8 gen = rf->rdma_ver;
43 	int ret;
44 
45 	rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
46 	if (!rf->vchnl_wq)
47 		return -ENOMEM;
48 
49 	mutex_init(&rf->sc_dev.vchnl_mutex);
50 
51 	virt_info.is_pf = !idc_priv->ftype;
52 	virt_info.hw_rev = gen;
53 	virt_info.privileged = gen == IRDMA_GEN_2;
54 	virt_info.vchnl_wq = rf->vchnl_wq;
55 	ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
56 	if (ret) {
57 		destroy_workqueue(rf->vchnl_wq);
58 		return ret;
59 	}
60 
61 	*rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
62 
63 	return 0;
64 }
65 
66 /**
67  * ig3rdma_request_reset - Request a reset
68  * @rf: RDMA PCI function
69  */
70 static void ig3rdma_request_reset(struct irdma_pci_f *rf)
71 {
72 	ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
73 	idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
74 }
75 
76 static int ig3rdma_cfg_regions(struct irdma_hw *hw,
77 			       struct iidc_rdma_core_dev_info *cdev_info)
78 {
79 	struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
80 	struct pci_dev *pdev = cdev_info->pdev;
81 	int i;
82 
83 	switch (idc_priv->ftype) {
84 	case IIDC_FUNCTION_TYPE_PF:
85 		hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
86 		hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
87 		break;
88 	case IIDC_FUNCTION_TYPE_VF:
89 		hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
90 		hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
91 		break;
92 	default:
93 		return -ENODEV;
94 	}
95 
96 	hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
97 				    hw->rdma_reg.len);
98 
99 	if (!hw->rdma_reg.addr)
100 		return -ENOMEM;
101 
102 	hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
103 	hw->io_regs = kcalloc(hw->num_io_regions,
104 			      sizeof(struct irdma_mmio_region), GFP_KERNEL);
105 
106 	if (!hw->io_regs) {
107 		iounmap(hw->rdma_reg.addr);
108 		return -ENOMEM;
109 	}
110 
111 	for (i = 0; i < hw->num_io_regions; i++) {
112 		hw->io_regs[i].addr =
113 			idc_priv->mapped_mem_regions[i].region_addr;
114 		hw->io_regs[i].len =
115 			le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
116 		hw->io_regs[i].offset =
117 			le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
118 	}
119 
120 	return 0;
121 }
122 
123 static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
124 {
125 	struct irdma_hw *hw = &rf->hw;
126 
127 	destroy_workqueue(rf->vchnl_wq);
128 	kfree(hw->io_regs);
129 	iounmap(hw->rdma_reg.addr);
130 }
131 
132 static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
133 			  struct iidc_rdma_core_dev_info *cdev_info)
134 {
135 	struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
136 	int err;
137 
138 	rf->sc_dev.hw = &rf->hw;
139 	rf->cdev = cdev_info;
140 	rf->pcidev = cdev_info->pdev;
141 	rf->hw.device = &rf->pcidev->dev;
142 	rf->msix_count = idc_priv->msix_count;
143 	rf->msix_entries = idc_priv->msix_entries;
144 
145 	err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
146 	if (err)
147 		return err;
148 
149 	err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
150 	if (err) {
151 		destroy_workqueue(rf->vchnl_wq);
152 		return err;
153 	}
154 
155 	rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
156 	rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
157 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
158 	rf->gen_ops.request_reset = ig3rdma_request_reset;
159 	rf->limits_sel = 7;
160 	mutex_init(&rf->ah_tbl_lock);
161 
162 	return 0;
163 }
164 
165 static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
166 			      const struct auxiliary_device_id *id)
167 {
168 	struct iidc_rdma_core_auxiliary_dev *idc_adev =
169 		container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
170 	struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
171 	struct irdma_pci_f *rf;
172 	int err;
173 
174 	rf = kzalloc(sizeof(*rf), GFP_KERNEL);
175 	if (!rf)
176 		return -ENOMEM;
177 
178 	err = ig3rdma_cfg_rf(rf, cdev_info);
179 	if (err)
180 		goto err_cfg_rf;
181 
182 	err = irdma_ctrl_init_hw(rf);
183 	if (err)
184 		goto err_ctrl_init;
185 
186 	auxiliary_set_drvdata(aux_dev, rf);
187 
188 	err = idpf_idc_vport_dev_ctrl(cdev_info, true);
189 	if (err)
190 		goto err_vport_ctrl;
191 
192 	return 0;
193 
194 err_vport_ctrl:
195 	irdma_ctrl_deinit_hw(rf);
196 err_ctrl_init:
197 	ig3rdma_decfg_rf(rf);
198 err_cfg_rf:
199 	kfree(rf);
200 
201 	return err;
202 }
203 
204 static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
205 {
206 	struct iidc_rdma_core_auxiliary_dev *idc_adev =
207 		container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
208 	struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
209 	struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
210 
211 	idpf_idc_vport_dev_ctrl(cdev_info, false);
212 	irdma_ctrl_deinit_hw(rf);
213 	ig3rdma_decfg_rf(rf);
214 	kfree(rf);
215 }
216 
217 static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
218 	{.name = "idpf.8086.rdma.core", },
219 	{},
220 };
221 
222 MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
223 
224 struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
225 	.adrv = {
226 		.name = "core",
227 		.id_table = ig3rdma_core_auxiliary_id_table,
228 		.probe = ig3rdma_core_probe,
229 		.remove = ig3rdma_core_remove,
230 	},
231 	.event_handler = ig3rdma_idc_core_event_handler,
232 };
233