| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_mr.c | 27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) in mr_check_range() argument 29 switch (mr->ibmr.type) { in mr_check_range() 35 if (iova < mr->ibmr.iova || in mr_check_range() 36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range() 37 rxe_dbg_mr(mr, "iova/length out of range\n"); in mr_check_range() 43 rxe_dbg_mr(mr, "mr type not supported\n"); in mr_check_range() 48 void rxe_mr_init(int access, struct rxe_mr *mr) in rxe_mr_init() argument 50 u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); in rxe_mr_init() 57 mr->lkey = mr->ibmr.lkey = key; in rxe_mr_init() 58 mr->rkey = mr->ibmr.rkey = key; in rxe_mr_init() [all …]
|
| H A D | rxe_odp.c | 44 static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcnt, u32 flags) in rxe_odp_do_pagefault_and_lock() argument 46 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_do_pagefault_and_lock() 64 static int rxe_odp_init_pages(struct rxe_mr *mr) in rxe_odp_init_pages() argument 66 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_init_pages() 69 ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address, in rxe_odp_init_pages() 70 mr->umem->length, in rxe_odp_init_pages() 80 u64 iova, int access_flags, struct rxe_mr *mr) in rxe_odp_mr_init_user() argument 88 rxe_mr_init(access_flags, mr); in rxe_odp_mr_init_user() 102 rxe_dbg_mr(mr, "Unabl in rxe_odp_mr_init_user() 160 rxe_odp_map_range_and_lock(struct rxe_mr * mr,u64 iova,int length,u32 flags) rxe_odp_map_range_and_lock() argument 189 __rxe_odp_mr_copy(struct rxe_mr * mr,u64 iova,void * addr,int length,enum rxe_mr_copy_dir dir) __rxe_odp_mr_copy() argument 225 rxe_odp_mr_copy(struct rxe_mr * mr,u64 iova,void * addr,int length,enum rxe_mr_copy_dir dir) rxe_odp_mr_copy() argument 261 rxe_odp_do_atomic_op(struct rxe_mr * mr,u64 iova,int opcode,u64 compare,u64 swap_add,u64 * orig_val) rxe_odp_do_atomic_op() argument 312 rxe_odp_atomic_op(struct rxe_mr * mr,u64 iova,int opcode,u64 compare,u64 swap_add,u64 * orig_val) rxe_odp_atomic_op() argument 330 rxe_odp_flush_pmem_iova(struct rxe_mr * mr,u64 iova,unsigned int length) rxe_odp_flush_pmem_iova() argument 368 rxe_odp_do_atomic_write(struct rxe_mr * mr,u64 iova,u64 value) rxe_odp_do_atomic_write() argument 416 struct rxe_mr *mr; global() member 466 struct rxe_mr *mr; rxe_ib_prefetch_sg_list() local 510 struct rxe_mr *mr; rxe_ib_advise_mr_prefetch() local [all...] |
| H A D | rxe_mw.c | 51 struct rxe_mw *mw, struct rxe_mr *mr, int access) in rxe_check_bind_mw() argument 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw() 91 if (!mr) in rxe_check_bind_mw() 94 if (unlikely(mr->access & IB_ZERO_BASED)) { in rxe_check_bind_mw() 100 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { in rxe_check_bind_mw() 109 !(mr->access & IB_ACCESS_LOCAL_WRITE))) { in rxe_check_bind_mw() 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw() 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw() 125 (mr->ibmr.iova + mr->ibmr.length)))) { in rxe_check_bind_mw() 136 struct rxe_mw *mw, struct rxe_mr *mr, int access) in rxe_do_bind_mw() argument [all …]
|
| /linux/net/sunrpc/xprtrdma/ |
| H A D | frwr_ops.c | 49 struct rpcrdma_mr *mr) in frwr_cid_init() argument 51 struct rpc_rdma_cid *cid = &mr->mr_cid; in frwr_cid_init() 54 cid->ci_completion_id = mr->mr_ibmr->res.id; in frwr_cid_init() 57 static void frwr_mr_unmap(struct rpcrdma_mr *mr) in frwr_mr_unmap() argument 59 if (mr->mr_device) { in frwr_mr_unmap() 60 trace_xprtrdma_mr_unmap(mr); in frwr_mr_unmap() 61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, in frwr_mr_unmap() 62 mr->mr_dir); in frwr_mr_unmap() 63 mr->mr_device = NULL; in frwr_mr_unmap() 72 void frwr_mr_release(struct rpcrdma_mr *mr) in frwr_mr_release() argument [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
| H A D | gddr5.c | 75 ram->mr[0] &= ~0xf7f; in nvkm_gddr5_calc() 76 ram->mr[0] |= (WR & 0x0f) << 8; in nvkm_gddr5_calc() 77 ram->mr[0] |= (CL & 0x0f) << 3; in nvkm_gddr5_calc() 78 ram->mr[0] |= (WL & 0x07) << 0; in nvkm_gddr5_calc() 80 ram->mr[1] &= ~0x0bf; in nvkm_gddr5_calc() 81 ram->mr[1] |= (xd & 0x01) << 7; in nvkm_gddr5_calc() 82 ram->mr[1] |= (at[0] & 0x03) << 4; in nvkm_gddr5_calc() 83 ram->mr[1] |= (dt & 0x03) << 2; in nvkm_gddr5_calc() 84 ram->mr[1] |= (ds & 0x03) << 0; in nvkm_gddr5_calc() 89 ram->mr1_nuts = ram->mr[1]; in nvkm_gddr5_calc() [all …]
|
| H A D | sddr3.c | 92 ODT = (ram->mr[1] & 0x004) >> 2 | in nvkm_sddr3_calc() 93 (ram->mr[1] & 0x040) >> 5 | in nvkm_sddr3_calc() 94 (ram->mr[1] & 0x200) >> 7; in nvkm_sddr3_calc() 106 ram->mr[0] &= ~0xf74; in nvkm_sddr3_calc() 107 ram->mr[0] |= (WR & 0x07) << 9; in nvkm_sddr3_calc() 108 ram->mr[0] |= (CL & 0x0e) << 3; in nvkm_sddr3_calc() 109 ram->mr[0] |= (CL & 0x01) << 2; in nvkm_sddr3_calc() 111 ram->mr[1] &= ~0x245; in nvkm_sddr3_calc() 112 ram->mr[1] |= (ODT & 0x1) << 2; in nvkm_sddr3_calc() 113 ram->mr[1] |= (ODT & 0x2) << 5; in nvkm_sddr3_calc() [all …]
|
| H A D | gddr3.c | 89 DLL = !(ram->mr[1] & 0x1); in nvkm_gddr3_calc() 90 RON = !((ram->mr[1] & 0x300) >> 8); in nvkm_gddr3_calc() 98 ODT = (ram->mr[1] & 0xc) >> 2; in nvkm_gddr3_calc() 101 hi = ram->mr[2] & 0x1; in nvkm_gddr3_calc() 107 ram->mr[0] &= ~0xf74; in nvkm_gddr3_calc() 108 ram->mr[0] |= (CWL & 0x07) << 9; in nvkm_gddr3_calc() 109 ram->mr[0] |= (CL & 0x07) << 4; in nvkm_gddr3_calc() 110 ram->mr[0] |= (CL & 0x08) >> 1; in nvkm_gddr3_calc() 112 ram->mr[1] &= ~0x3fc; in nvkm_gddr3_calc() 113 ram->mr[1] |= (ODT & 0x03) << 2; in nvkm_gddr3_calc() [all …]
|
| H A D | sddr2.c | 82 ODT = (ram->mr[1] & 0x004) >> 2 | in nvkm_sddr2_calc() 83 (ram->mr[1] & 0x040) >> 5; in nvkm_sddr2_calc() 91 ram->mr[0] &= ~0xf70; in nvkm_sddr2_calc() 92 ram->mr[0] |= (WR & 0x07) << 9; in nvkm_sddr2_calc() 93 ram->mr[0] |= (CL & 0x07) << 4; in nvkm_sddr2_calc() 95 ram->mr[1] &= ~0x045; in nvkm_sddr2_calc() 96 ram->mr[1] |= (ODT & 0x1) << 2; in nvkm_sddr2_calc() 97 ram->mr[1] |= (ODT & 0x2) << 5; in nvkm_sddr2_calc() 98 ram->mr[1] |= !DLL; in nvkm_sddr2_calc()
|
| /linux/include/trace/events/ |
| H A D | tsm_mr.h | 13 TP_PROTO(const struct tsm_measurement_register *mr), 15 TP_ARGS(mr), 18 __string(mr, mr->mr_name) 19 __string(hash, mr->mr_flags & TSM_MR_F_NOHASH ? 20 "data" : hash_algo_name[mr->mr_hash]) 21 __dynamic_array(u8, d, mr->mr_size) 25 __assign_str(mr); 27 memcpy(__get_dynamic_array(d), mr->mr_value, __get_dynamic_array_len(d)); 30 TP_printk("[%s] %s:%s", __get_str(mr), __get_str(hash), 36 TP_PROTO(const struct tsm_measurement_register *mr, int rc), [all …]
|
| /linux/io_uring/ |
| H A D | memmap.c | 91 void io_free_region(struct user_struct *user, struct io_mapped_region *mr) in io_free_region() argument 93 if (mr->pages) { in io_free_region() 94 long nr_refs = mr->nr_pages; in io_free_region() 96 if (mr->flags & IO_REGION_F_SINGLE_REF) in io_free_region() 99 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_free_region() 100 unpin_user_pages(mr->pages, nr_refs); in io_free_region() 102 release_pages(mr->pages, nr_refs); in io_free_region() 104 kvfree(mr->pages); in io_free_region() 106 if ((mr->flags & IO_REGION_F_VMAP) && mr in io_free_region() 114 io_region_init_ptr(struct io_mapped_region * mr) io_region_init_ptr() argument 134 io_region_pin_pages(struct io_mapped_region * mr,struct io_uring_region_desc * reg) io_region_pin_pages() argument 152 io_region_allocate_pages(struct io_mapped_region * mr,struct io_uring_region_desc * reg,unsigned long mmap_offset) io_region_allocate_pages() argument 184 io_create_region(struct io_ring_ctx * ctx,struct io_mapped_region * mr,struct io_uring_region_desc * reg,unsigned long mmap_offset) io_create_region() argument 259 io_region_validate_mmap(struct io_ring_ctx * ctx,struct io_mapped_region * mr) io_region_validate_mmap() argument 286 io_region_mmap(struct io_ring_ctx * ctx,struct io_mapped_region * mr,struct vm_area_struct * vma,unsigned max_pages) io_region_mmap() argument [all...] |
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | hvCall.S | 64 mr r4,r3; \ 65 mr r3,r0; \ 77 mr r5,BUFREG; \ 160 mr r4,r5 161 mr r5,r6 162 mr r6,r7 163 mr r7,r8 164 mr r8,r9 165 mr r9,r10 187 mr r4,r5 [all …]
|
| /linux/net/rds/ |
| H A D | rdma.c | 70 struct rds_mr *mr; in rds_mr_tree_walk() local 74 mr = rb_entry(parent, struct rds_mr, r_rb_node); in rds_mr_tree_walk() 76 if (key < mr->r_key) in rds_mr_tree_walk() 78 else if (key > mr->r_key) in rds_mr_tree_walk() 81 return mr; in rds_mr_tree_walk() 95 static void rds_destroy_mr(struct rds_mr *mr) in rds_destroy_mr() argument 97 struct rds_sock *rs = mr->r_sock; in rds_destroy_mr() 102 mr->r_key, kref_read(&mr->r_kref)); in rds_destroy_mr() 105 if (!RB_EMPTY_NODE(&mr->r_rb_node)) in rds_destroy_mr() 106 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); in rds_destroy_mr() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | trace_mr.h | 21 TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), 22 TP_ARGS(mr, m, n, v, len), 24 RDI_DEV_ENTRY(ib_to_rvt(mr->pd->device)) 37 RDI_DEV_ASSIGN(ib_to_rvt(mr->pd->device)); 40 __entry->iova = mr->iova; 41 __entry->user_base = mr->user_base; 42 __entry->lkey = mr->lkey; 46 __entry->length = mr->length; 47 __entry->offset = mr->offset; 67 TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | mr_pool.c | 10 struct ib_mr *mr; in ib_mr_pool_get() local 14 mr = list_first_entry_or_null(list, struct ib_mr, qp_entry); in ib_mr_pool_get() 15 if (mr) { in ib_mr_pool_get() 16 list_del(&mr->qp_entry); in ib_mr_pool_get() 21 return mr; in ib_mr_pool_get() 25 void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr) in ib_mr_pool_put() argument 30 list_add(&mr->qp_entry, list); in ib_mr_pool_put() 39 struct ib_mr *mr; in ib_mr_pool_init() local 45 mr = ib_alloc_mr_integrity(qp->pd, max_num_sg, in ib_mr_pool_init() 48 mr = ib_alloc_mr(qp->pd, type, max_num_sg); in ib_mr_pool_init() [all …]
|
| /linux/drivers/rtc/ |
| H A D | rtc-at91sam9.c | 133 u32 offset, alarm, mr; in at91_rtc_settime() local 140 mr = rtt_readl(rtc, MR); in at91_rtc_settime() 143 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); in at91_rtc_settime() 164 mr &= ~AT91_RTT_ALMIEN; in at91_rtc_settime() 170 rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST); in at91_rtc_settime() 205 u32 mr; in at91_rtc_setalarm() local 214 mr = rtt_readl(rtc, MR); in at91_rtc_setalarm() 215 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); in at91_rtc_setalarm() 226 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); in at91_rtc_setalarm() 236 u32 mr = rtt_readl(rtc, MR); in at91_rtc_alarm_irq_enable() local [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | odp.c | 159 struct mlx5_ib_mr *mr, int flags) in populate_mtt() argument 161 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in populate_mtt() 195 struct mlx5_ib_mr *mr, int flags) in mlx5_odp_populate_xlt() argument 198 populate_ksm(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt() 201 return populate_mtt(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt() 213 struct mlx5_ib_mr *mr = in free_implicit_child_mr_work() local 215 struct mlx5_ib_mr *imr = mr->parent; in free_implicit_child_mr_work() 217 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in free_implicit_child_mr_work() 219 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in free_implicit_child_mr_work() 222 mlx5r_umr_update_xlt(mr->parent, in free_implicit_child_mr_work() [all …]
|
| /linux/drivers/sh/intc/ |
| H A D | handle.c | 44 struct intc_mask_reg *mr = desc->hw.mask_regs; in _intc_mask_data() local 48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { in _intc_mask_data() 49 mr = desc->hw.mask_regs + *reg_idx; in _intc_mask_data() 51 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { in _intc_mask_data() 52 if (mr->enum_ids[*fld_idx] != enum_id) in _intc_mask_data() 55 if (mr->set_reg && mr->clr_reg) { in _intc_mask_data() 58 reg_e = mr->clr_reg; in _intc_mask_data() 59 reg_d = mr->set_reg; in _intc_mask_data() 62 if (mr->set_reg) { in _intc_mask_data() 64 reg_e = mr->set_reg; in _intc_mask_data() [all …]
|
| H A D | balancing.c | 44 struct intc_mask_reg *mr = desc->hw.mask_regs; in intc_dist_data() local 48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { in intc_dist_data() 49 mr = desc->hw.mask_regs + i; in intc_dist_data() 55 if (!mr->dist_reg) in intc_dist_data() 58 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { in intc_dist_data() 59 if (mr->enum_ids[j] != enum_id) in intc_dist_data() 64 reg_e = mr->dist_reg; in intc_dist_data() 65 reg_d = mr->dist_reg; in intc_dist_data() 67 fn += (mr->reg_width >> 3) - 1; in intc_dist_data() 72 (mr->reg_width - 1) - j); in intc_dist_data()
|
| /linux/drivers/infiniband/ulp/iser/ |
| H A D | iser_memory.c | 236 static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr, in iser_inv_rkey() argument 241 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey() 255 struct ib_mr *mr = rsc->sig_mr; in iser_reg_sig_mr() local 256 struct ib_sig_attrs *sig_attrs = mr->sig_attrs; in iser_reg_sig_mr() 268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr() 270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_reg_sig_mr() 272 ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL, in iser_reg_sig_mr() 286 wr->mr = mr; in iser_reg_sig_mr() 287 wr->key = mr->rkey; in iser_reg_sig_mr() 293 sig_reg->sge.lkey = mr->lkey; in iser_reg_sig_mr() [all …]
|
| /linux/arch/x86/mm/ |
| H A D | init.c | 327 static int __meminit save_mr(struct map_range *mr, int nr_range, in save_mr() argument 334 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 335 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr() 336 mr[nr_range].page_size_mask = page_size_mask; in save_mr() 347 static void __ref adjust_range_page_size_mask(struct map_range *mr, in adjust_range_page_size_mask() argument 354 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { in adjust_range_page_size_mask() 355 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() 356 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 364 mr[i].page_size_mask |= 1<<PG_LEVEL_2M; in adjust_range_page_size_mask() 367 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { in adjust_range_page_size_mask() [all …]
|
| /linux/drivers/watchdog/ |
| H A D | sama5d4_wdt.c | 31 u32 mr; member 51 #define wdt_enabled (!(wdt->mr & AT91_WDT_WDDIS)) 86 wdt->mr &= ~AT91_SAM9X60_WDDIS; in sama5d4_wdt_start() 88 wdt->mr &= ~AT91_WDT_WDDIS; in sama5d4_wdt_start() 90 wdt_write(wdt, AT91_WDT_MR, wdt->mr); in sama5d4_wdt_start() 101 wdt->mr |= AT91_SAM9X60_WDDIS; in sama5d4_wdt_stop() 103 wdt->mr |= AT91_WDT_WDDIS; in sama5d4_wdt_stop() 105 wdt_write(wdt, AT91_WDT_MR, wdt->mr); in sama5d4_wdt_stop() 133 wdt->mr &= ~AT91_WDT_WDV; in sama5d4_wdt_set_timeout() 134 wdt->mr |= AT91_WDT_SET_WDV(value); in sama5d4_wdt_set_timeout() [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | head_64.S | 159 mr r24,r3 161 mr r25,r4 179 mr r3,r24 185 mr r4,r25 307 mr r24,r3 313 mr r3,r24 338 mr r24,r3 339 mr r25,r4 346 mr r3,r24 347 mr r4,r25 [all …]
|
| /linux/include/rdma/ |
| H A D | rdmavt_mr.h | 68 struct rvt_mregion *mr; member 83 static inline void rvt_put_mr(struct rvt_mregion *mr) in rvt_put_mr() argument 85 percpu_ref_put(&mr->refcount); in rvt_put_mr() 88 static inline void rvt_get_mr(struct rvt_mregion *mr) in rvt_get_mr() argument 90 percpu_ref_get(&mr->refcount); in rvt_get_mr() 96 rvt_put_mr(ss->sge.mr); in rvt_put_ss() 124 rvt_put_mr(sge->mr); in rvt_update_sge() 127 } else if (sge->length == 0 && sge->mr->lkey) { in rvt_update_sge() 129 if (++sge->m >= sge->mr->mapsz) in rvt_update_sge() 133 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in rvt_update_sge() [all …]
|
| /linux/fs/smb/client/ |
| H A D | smbdirect.c | 2307 struct smbdirect_mr_io *mr = in register_mr_done() local 2309 struct smbdirect_socket *sc = mr->socket; in register_mr_done() 2338 rc = ib_dereg_mr(smbdirect_mr->mr); in smbd_mr_recovery_work() 2347 smbdirect_mr->mr = ib_alloc_mr( in smbd_mr_recovery_work() 2350 if (IS_ERR(smbdirect_mr->mr)) { in smbd_mr_recovery_work() 2375 static void smbd_mr_disable_locked(struct smbdirect_mr_io *mr) in smbd_mr_disable_locked() argument 2377 struct smbdirect_socket *sc = mr->socket; in smbd_mr_disable_locked() 2379 lockdep_assert_held(&mr->mutex); in smbd_mr_disable_locked() 2381 if (mr->state == SMBDIRECT_MR_DISABLED) in smbd_mr_disable_locked() 2384 if (mr->mr) in smbd_mr_disable_locked() [all …]
|
| /linux/net/xdp/ |
| H A D | xdp_umem.c | 157 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) in xdp_umem_reg() argument 159 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xdp_umem_reg() 160 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; in xdp_umem_reg() 161 u64 addr = mr->addr, size = mr->len; in xdp_umem_reg() 176 if (mr->flags & ~XDP_UMEM_FLAGS_VALID) in xdp_umem_reg() 208 if (mr->flags & XDP_UMEM_TX_METADATA_LEN) { in xdp_umem_reg() 209 if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8) in xdp_umem_reg() 211 umem->tx_metadata_len = mr->tx_metadata_len; in xdp_umem_reg() 221 umem->flags = mr->flags; in xdp_umem_reg() 247 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) in xdp_umem_create() argument [all …]
|