Lines Matching full:dsa
142 struct dsa_state *dsa = to_dsa_state(device); in dsa_wq_init() local
143 union wq_cap_reg wq_cap = dsa->wq_cap; in dsa_wq_init()
159 .max_xfer_shift = dsa->gen_cap.max_xfer_shift, in dsa_wq_init()
160 .max_batch_shift = dsa->gen_cap.max_batch_shift, in dsa_wq_init()
167 writel(wqcfg.bits[i], dsa->wqcfg_table + offsetof(union wqcfg, bits[i])); in dsa_wq_init()
172 struct dsa_state *dsa = to_dsa_state(device); in dsa_group_init() local
173 union group_cap_reg group_cap = dsa->group_cap; in dsa_group_init()
174 union engine_cap_reg engine_cap = dsa->engine_cap; in dsa_group_init()
180 writeq(1, dsa->grpcfg_table + offsetof(struct grpcfg, wqs[0])); in dsa_group_init()
181 writeq(1, dsa->grpcfg_table + offsetof(struct grpcfg, engines)); in dsa_group_init()
186 struct dsa_state *dsa = to_dsa_state(device); in dsa_register_cache_init() local
189 dsa->gen_cap.bits = readq(bar0 + IDXD_GENCAP_OFFSET); in dsa_register_cache_init()
190 dsa->wq_cap.bits = readq(bar0 + IDXD_WQCAP_OFFSET); in dsa_register_cache_init()
191 dsa->group_cap.bits = readq(bar0 + IDXD_GRPCAP_OFFSET); in dsa_register_cache_init()
192 dsa->engine_cap.bits = readq(bar0 + IDXD_ENGCAP_OFFSET); in dsa_register_cache_init()
194 dsa->table_offsets.bits[0] = readq(bar0 + IDXD_TABLE_OFFSET); in dsa_register_cache_init()
195 dsa->table_offsets.bits[1] = readq(bar0 + IDXD_TABLE_OFFSET + 8); in dsa_register_cache_init()
197 dsa->wqcfg_table = bar0 + dsa->table_offsets.wqcfg * IDXD_TABLE_MULT; in dsa_register_cache_init()
198 dsa->grpcfg_table = bar0 + dsa->table_offsets.grpcfg * IDXD_TABLE_MULT; in dsa_register_cache_init()
200 dsa->max_batches = 1U << (dsa->wq_cap.total_wq_size + IDXD_WQCFG_MIN); in dsa_register_cache_init()
201 dsa->max_batches = min(dsa->max_batches, ARRAY_SIZE(dsa->batch)); in dsa_register_cache_init()
203 dsa->max_copies_per_batch = 1UL << dsa->gen_cap.max_batch_shift; in dsa_register_cache_init()
204 dsa->max_copies_per_batch = min(dsa->max_copies_per_batch, ARRAY_SIZE(dsa->copy)); in dsa_register_cache_init()
209 struct dsa_state *dsa = to_dsa_state(device); in dsa_init() local
211 VFIO_ASSERT_GE(device->driver.region.size, sizeof(*dsa)); in dsa_init()
230 dsa->max_batches * dsa->max_copies_per_batch; in dsa_init()
231 device->driver.max_memcpy_size = 1UL << dsa->gen_cap.max_xfer_shift; in dsa_init()
268 struct dsa_state *dsa = to_dsa_state(device); in dsa_copy_desc_init() local
283 .completion_addr = to_iova(device, &dsa->copy_completion), in dsa_copy_desc_init()
292 struct dsa_state *dsa = to_dsa_state(device); in dsa_batch_desc_init() local
298 .completion_addr = to_iova(device, &dsa->batch_completion), in dsa_batch_desc_init()
299 .desc_list_addr = to_iova(device, &dsa->copy[0]), in dsa_batch_desc_init()
313 struct dsa_state *dsa = to_dsa_state(device); in dsa_memcpy_one() local
315 memset(&dsa->copy_completion, 0, sizeof(dsa->copy_completion)); in dsa_memcpy_one()
317 dsa_copy_desc_init(device, &dsa->copy[0], src, dst, size, interrupt); in dsa_memcpy_one()
318 dsa_desc_write(device, &dsa->copy[0]); in dsa_memcpy_one()
324 struct dsa_state *dsa = to_dsa_state(device); in dsa_memcpy_batch() local
327 memset(&dsa->batch_completion, 0, sizeof(dsa->batch_completion)); in dsa_memcpy_batch()
329 for (i = 0; i < ARRAY_SIZE(dsa->copy); i++) { in dsa_memcpy_batch()
330 struct dsa_hw_desc *copy_desc = &dsa->copy[i]; in dsa_memcpy_batch()
338 for (i = 0; i < ARRAY_SIZE(dsa->batch) && count; i++) { in dsa_memcpy_batch()
339 struct dsa_hw_desc *batch_desc = &dsa->batch[i]; in dsa_memcpy_batch()
342 nr_copies = min(count, dsa->max_copies_per_batch); in dsa_memcpy_batch()
370 struct dsa_state *dsa = to_dsa_state(device); in dsa_memcpy_start() local
372 /* DSA devices require at least 2 copies per batch. */ in dsa_memcpy_start()
378 dsa->memcpy_count = count; in dsa_memcpy_start()
383 struct dsa_state *dsa = to_dsa_state(device); in dsa_memcpy_wait() local
386 if (dsa->memcpy_count == 1) in dsa_memcpy_wait()
387 r = dsa_completion_wait(device, &dsa->copy_completion); in dsa_memcpy_wait()
389 r = dsa_completion_wait(device, &dsa->batch_completion); in dsa_memcpy_wait()
391 dsa->memcpy_count = 0; in dsa_memcpy_wait()
398 struct dsa_state *dsa = to_dsa_state(device); in dsa_send_msi() local
401 to_iova(device, &dsa->send_msi_src), in dsa_send_msi()
402 to_iova(device, &dsa->send_msi_dst), in dsa_send_msi()
403 sizeof(dsa->send_msi_src), true); in dsa_send_msi()
405 VFIO_ASSERT_EQ(dsa_completion_wait(device, &dsa->copy_completion), 0); in dsa_send_msi()
409 .name = "dsa",