init.c (47c16ac27d4cb664cee53ee0b9b7e2f907923fb3) init.c (7c5dd23e57c14cf7177b8a5e0fd08916e0c60005)
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/pci.h>
8#include <linux/interrupt.h>

--- 131 unchanged lines hidden (view full) ---

140 /* Disable error interrupt generation */
141 idxd_mask_error_interrupts(idxd);
142 err_irq_entries:
143 pci_free_irq_vectors(pdev);
144 dev_err(dev, "No usable interrupts\n");
145 return rc;
146}
147
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/pci.h>
8#include <linux/interrupt.h>

--- 131 unchanged lines hidden (view full) ---

140 /* Disable error interrupt generation */
141 idxd_mask_error_interrupts(idxd);
142 err_irq_entries:
143 pci_free_irq_vectors(pdev);
144 dev_err(dev, "No usable interrupts\n");
145 return rc;
146}
147
148static int idxd_setup_wqs(struct idxd_device *idxd)
149{
150 struct device *dev = &idxd->pdev->dev;
151 struct idxd_wq *wq;
152 int i, rc;
153
154 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
155 GFP_KERNEL, dev_to_node(dev));
156 if (!idxd->wqs)
157 return -ENOMEM;
158
159 for (i = 0; i < idxd->max_wqs; i++) {
160 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
161 if (!wq) {
162 rc = -ENOMEM;
163 goto err;
164 }
165
166 wq->id = i;
167 wq->idxd = idxd;
168 device_initialize(&wq->conf_dev);
169 wq->conf_dev.parent = &idxd->conf_dev;
170 wq->conf_dev.bus = idxd_get_bus_type(idxd);
171 wq->conf_dev.type = &idxd_wq_device_type;
172 rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
173 if (rc < 0) {
174 put_device(&wq->conf_dev);
175 goto err;
176 }
177
178 mutex_init(&wq->wq_lock);
179 wq->idxd_cdev.minor = -1;
180 wq->max_xfer_bytes = idxd->max_xfer_bytes;
181 wq->max_batch_size = idxd->max_batch_size;
182 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
183 if (!wq->wqcfg) {
184 put_device(&wq->conf_dev);
185 rc = -ENOMEM;
186 goto err;
187 }
188 idxd->wqs[i] = wq;
189 }
190
191 return 0;
192
193 err:
194 while (--i >= 0)
195 put_device(&idxd->wqs[i]->conf_dev);
196 return rc;
197}
198
148static int idxd_setup_internals(struct idxd_device *idxd)
149{
150 struct device *dev = &idxd->pdev->dev;
199static int idxd_setup_internals(struct idxd_device *idxd)
200{
201 struct device *dev = &idxd->pdev->dev;
151 int i;
202 int i, rc;
152
153 init_waitqueue_head(&idxd->cmd_waitq);
203
204 init_waitqueue_head(&idxd->cmd_waitq);
205
206 rc = idxd_setup_wqs(idxd);
207 if (rc < 0)
208 return rc;
209
154 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
155 sizeof(struct idxd_group), GFP_KERNEL);
210 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
211 sizeof(struct idxd_group), GFP_KERNEL);
156 if (!idxd->groups)
157 return -ENOMEM;
212 if (!idxd->groups) {
213 rc = -ENOMEM;
214 goto err;
215 }
158
159 for (i = 0; i < idxd->max_groups; i++) {
160 idxd->groups[i].idxd = idxd;
161 idxd->groups[i].id = i;
162 idxd->groups[i].tc_a = -1;
163 idxd->groups[i].tc_b = -1;
164 }
165
216
217 for (i = 0; i < idxd->max_groups; i++) {
218 idxd->groups[i].idxd = idxd;
219 idxd->groups[i].id = i;
220 idxd->groups[i].tc_a = -1;
221 idxd->groups[i].tc_b = -1;
222 }
223
166 idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
167 GFP_KERNEL);
168 if (!idxd->wqs)
169 return -ENOMEM;
170
171 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
172 sizeof(struct idxd_engine), GFP_KERNEL);
224 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
225 sizeof(struct idxd_engine), GFP_KERNEL);
173 if (!idxd->engines)
174 return -ENOMEM;
175
176 for (i = 0; i < idxd->max_wqs; i++) {
177 struct idxd_wq *wq = &idxd->wqs[i];
178
179 wq->id = i;
180 wq->idxd = idxd;
181 mutex_init(&wq->wq_lock);
182 wq->idxd_cdev.minor = -1;
183 wq->max_xfer_bytes = idxd->max_xfer_bytes;
184 wq->max_batch_size = idxd->max_batch_size;
185 wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
186 if (!wq->wqcfg)
187 return -ENOMEM;
226 if (!idxd->engines) {
227 rc = -ENOMEM;
228 goto err;
188 }
189
229 }
230
231
190 for (i = 0; i < idxd->max_engines; i++) {
191 idxd->engines[i].idxd = idxd;
192 idxd->engines[i].id = i;
193 }
194
195 idxd->wq = create_workqueue(dev_name(dev));
232 for (i = 0; i < idxd->max_engines; i++) {
233 idxd->engines[i].idxd = idxd;
234 idxd->engines[i].id = i;
235 }
236
237 idxd->wq = create_workqueue(dev_name(dev));
196 if (!idxd->wq)
197 return -ENOMEM;
238 if (!idxd->wq) {
239 rc = -ENOMEM;
240 goto err;
241 }
198
199 return 0;
242
243 return 0;
244
245 err:
246 for (i = 0; i < idxd->max_wqs; i++)
247 put_device(&idxd->wqs[i]->conf_dev);
248 return rc;
200}
201
202static void idxd_read_table_offsets(struct idxd_device *idxd)
203{
204 union offsets_reg offsets;
205 struct device *dev = &idxd->pdev->dev;
206
207 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);

--- 158 unchanged lines hidden (view full) ---

366 dev_warn(dev, "User forced SVA off via module param.\n");
367 }
368
369 idxd_read_caps(idxd);
370 idxd_read_table_offsets(idxd);
371
372 rc = idxd_setup_internals(idxd);
373 if (rc)
249}
250
251static void idxd_read_table_offsets(struct idxd_device *idxd)
252{
253 union offsets_reg offsets;
254 struct device *dev = &idxd->pdev->dev;
255
256 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);

--- 158 unchanged lines hidden (view full) ---

415 dev_warn(dev, "User forced SVA off via module param.\n");
416 }
417
418 idxd_read_caps(idxd);
419 idxd_read_table_offsets(idxd);
420
421 rc = idxd_setup_internals(idxd);
422 if (rc)
374 goto err_setup;
423 goto err;
375
376 rc = idxd_setup_interrupts(idxd);
377 if (rc)
424
425 rc = idxd_setup_interrupts(idxd);
426 if (rc)
378 goto err_setup;
427 goto err;
379
380 dev_dbg(dev, "IDXD interrupt setup complete.\n");
381
382 idxd->major = idxd_cdev_get_major(idxd);
383
384 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
385 return 0;
386
428
429 dev_dbg(dev, "IDXD interrupt setup complete.\n");
430
431 idxd->major = idxd_cdev_get_major(idxd);
432
433 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
434 return 0;
435
387 err_setup:
436 err:
388 if (device_pasid_enabled(idxd))
389 idxd_disable_system_pasid(idxd);
390 return rc;
391}
392
393static void idxd_type_init(struct idxd_device *idxd)
394{
395 if (idxd->type == IDXD_TYPE_DSA)

--- 211 unchanged lines hidden ---
437 if (device_pasid_enabled(idxd))
438 idxd_disable_system_pasid(idxd);
439 return rc;
440}
441
442static void idxd_type_init(struct idxd_device *idxd)
443{
444 if (idxd->type == IDXD_TYPE_DSA)

--- 211 unchanged lines hidden ---