Lines Matching full:area

48 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);  in io_zcrx_iov_page()  local
51 lockdep_assert(!area->mem.is_dmabuf); in io_zcrx_iov_page()
53 niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT; in io_zcrx_iov_page()
54 return area->mem.pages[net_iov_idx(niov) << niov_pages_shift]; in io_zcrx_iov_page()
58 struct io_zcrx_area *area) in io_populate_area_dma() argument
61 struct sg_table *sgt = area->mem.sgt; in io_populate_area_dma()
72 while (sg_len && niov_idx < area->nia.num_niovs) { in io_populate_area_dma()
73 struct net_iov *niov = &area->nia.niovs[niov_idx]; in io_populate_area_dma()
83 if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs)) in io_populate_area_dma()
253 struct io_zcrx_area *area) in io_zcrx_unmap_area() argument
258 if (!area->is_mapped) in io_zcrx_unmap_area()
260 area->is_mapped = false; in io_zcrx_unmap_area()
262 for (i = 0; i < area->nia.num_niovs; i++) in io_zcrx_unmap_area()
263 net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0); in io_zcrx_unmap_area()
265 if (area->mem.is_dmabuf) { in io_zcrx_unmap_area()
266 io_release_dmabuf(&area->mem); in io_zcrx_unmap_area()
268 dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table, in io_zcrx_unmap_area()
273 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) in io_zcrx_map_area() argument
278 if (area->is_mapped) in io_zcrx_map_area()
281 if (!area->mem.is_dmabuf) { in io_zcrx_map_area()
282 ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table, in io_zcrx_map_area()
288 ret = io_populate_area_dma(ifq, area); in io_zcrx_map_area()
290 area->is_mapped = true; in io_zcrx_map_area()
327 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); in io_get_user_counter() local
329 return &area->user_refs[net_iov_idx(niov)]; in io_get_user_counter()
386 static void io_zcrx_free_area(struct io_zcrx_area *area) in io_zcrx_free_area() argument
388 io_zcrx_unmap_area(area->ifq, area); in io_zcrx_free_area()
389 io_release_area_mem(&area->mem); in io_zcrx_free_area()
391 if (area->mem.account_pages) in io_zcrx_free_area()
392 io_unaccount_mem(area->ifq->ctx, area->mem.account_pages); in io_zcrx_free_area()
394 kvfree(area->freelist); in io_zcrx_free_area()
395 kvfree(area->nia.niovs); in io_zcrx_free_area()
396 kvfree(area->user_refs); in io_zcrx_free_area()
397 kfree(area); in io_zcrx_free_area()
401 struct io_zcrx_area *area) in io_zcrx_append_area() argument
403 if (ifq->area) in io_zcrx_append_area()
405 ifq->area = area; in io_zcrx_append_area()
412 struct io_zcrx_area *area; in io_zcrx_create_area() local
417 area = kzalloc(sizeof(*area), GFP_KERNEL); in io_zcrx_create_area()
418 if (!area) in io_zcrx_create_area()
420 area->ifq = ifq; in io_zcrx_create_area()
422 ret = io_import_area(ifq, &area->mem, area_reg); in io_zcrx_create_area()
427 nr_iovs = area->mem.size >> ifq->niov_shift; in io_zcrx_create_area()
428 area->nia.num_niovs = nr_iovs; in io_zcrx_create_area()
431 area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]), in io_zcrx_create_area()
433 if (!area->nia.niovs) in io_zcrx_create_area()
436 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]), in io_zcrx_create_area()
438 if (!area->freelist) in io_zcrx_create_area()
441 area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]), in io_zcrx_create_area()
443 if (!area->user_refs) in io_zcrx_create_area()
447 struct net_iov *niov = &area->nia.niovs[i]; in io_zcrx_create_area()
449 niov->owner = &area->nia; in io_zcrx_create_area()
450 area->freelist[i] = i; in io_zcrx_create_area()
451 atomic_set(&area->user_refs[i], 0); in io_zcrx_create_area()
455 area->free_count = nr_iovs; in io_zcrx_create_area()
456 /* we're only supporting one area per ifq for now */ in io_zcrx_create_area()
457 area->area_id = 0; in io_zcrx_create_area()
458 area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT; in io_zcrx_create_area()
459 spin_lock_init(&area->freelist_lock); in io_zcrx_create_area()
461 ret = io_zcrx_append_area(ifq, area); in io_zcrx_create_area()
465 if (area) in io_zcrx_create_area()
466 io_zcrx_free_area(area); in io_zcrx_create_area()
524 if (ifq->area) in io_zcrx_ifq_free()
525 io_zcrx_free_area(ifq->area); in io_zcrx_ifq_free()
548 struct io_uring_zcrx_area_reg area; in io_register_zcrx_ifq() local
583 if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area))) in io_register_zcrx_ifq()
616 ret = io_zcrx_create_area(ifq, &area); in io_register_zcrx_ifq()
638 copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) { in io_register_zcrx_ifq()
673 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area) in __io_zcrx_get_free_niov() argument
677 lockdep_assert_held(&area->freelist_lock); in __io_zcrx_get_free_niov()
679 niov_idx = area->freelist[--area->free_count]; in __io_zcrx_get_free_niov()
680 return &area->nia.niovs[niov_idx]; in __io_zcrx_get_free_niov()
685 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); in io_zcrx_return_niov_freelist() local
687 spin_lock_bh(&area->freelist_lock); in io_zcrx_return_niov_freelist()
688 area->freelist[area->free_count++] = net_iov_idx(niov); in io_zcrx_return_niov_freelist()
689 spin_unlock_bh(&area->freelist_lock); in io_zcrx_return_niov_freelist()
706 struct io_zcrx_area *area = ifq->area; in io_zcrx_scrub() local
709 if (!area) in io_zcrx_scrub()
713 for (i = 0; i < area->nia.num_niovs; i++) { in io_zcrx_scrub()
714 struct net_iov *niov = &area->nia.niovs[i]; in io_zcrx_scrub()
759 struct io_zcrx_area *area; in io_parse_rqe() local
766 area = ifq->area; in io_parse_rqe()
768 if (unlikely(niov_idx >= area->nia.num_niovs)) in io_parse_rqe()
770 niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs); in io_parse_rqe()
772 *ret_niov = &area->nia.niovs[niov_idx]; in io_parse_rqe()
817 struct io_zcrx_area *area = ifq->area; in io_zcrx_refill_slow() local
819 spin_lock_bh(&area->freelist_lock); in io_zcrx_refill_slow()
820 while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { in io_zcrx_refill_slow()
821 struct net_iov *niov = __io_zcrx_get_free_niov(area); in io_zcrx_refill_slow()
828 spin_unlock_bh(&area->freelist_lock); in io_zcrx_refill_slow()
879 ret = io_zcrx_map_area(ifq, ifq->area); in io_pp_zc_init()
915 if (ifq->area) in io_pp_uninstall()
916 io_zcrx_unmap_area(ifq, ifq->area); in io_pp_uninstall()
936 struct io_zcrx_area *area; in io_zcrx_queue_cqe() local
949 area = io_zcrx_iov_to_area(niov); in io_zcrx_queue_cqe()
952 rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT); in io_zcrx_queue_cqe()
959 struct io_zcrx_area *area = ifq->area; in io_alloc_fallback_niov() local
962 if (area->mem.is_dmabuf) in io_alloc_fallback_niov()
965 spin_lock_bh(&area->freelist_lock); in io_alloc_fallback_niov()
966 if (area->free_count) in io_alloc_fallback_niov()
967 niov = __io_zcrx_get_free_niov(area); in io_alloc_fallback_niov()
968 spin_unlock_bh(&area->freelist_lock); in io_alloc_fallback_niov()