1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * Network memory 4 * 5 * Author: Mina Almasry <almasrymina@google.com> 6 */ 7 8 #ifndef _NET_NETMEM_H 9 #define _NET_NETMEM_H 10 11 #include <linux/mm.h> 12 #include <net/net_debug.h> 13 14 /* net_iov */ 15 16 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers); 17 18 /* We overload the LSB of the struct page pointer to indicate whether it's 19 * a page or net_iov. 20 */ 21 #define NET_IOV 0x01UL 22 23 struct net_iov { 24 unsigned long __unused_padding; 25 unsigned long pp_magic; 26 struct page_pool *pp; 27 struct dmabuf_genpool_chunk_owner *owner; 28 unsigned long dma_addr; 29 atomic_long_t pp_ref_count; 30 }; 31 32 /* These fields in struct page are used by the page_pool and net stack: 33 * 34 * struct { 35 * unsigned long pp_magic; 36 * struct page_pool *pp; 37 * unsigned long _pp_mapping_pad; 38 * unsigned long dma_addr; 39 * atomic_long_t pp_ref_count; 40 * }; 41 * 42 * We mirror the page_pool fields here so the page_pool can access these fields 43 * without worrying whether the underlying fields belong to a page or net_iov. 44 * 45 * The non-net stack fields of struct page are private to the mm stack and must 46 * never be mirrored to net_iov. 47 */ 48 #define NET_IOV_ASSERT_OFFSET(pg, iov) \ 49 static_assert(offsetof(struct page, pg) == \ 50 offsetof(struct net_iov, iov)) 51 NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic); 52 NET_IOV_ASSERT_OFFSET(pp, pp); 53 NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr); 54 NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count); 55 #undef NET_IOV_ASSERT_OFFSET 56 57 /* netmem */ 58 59 /** 60 * typedef netmem_ref - a nonexistent type marking a reference to generic 61 * network memory. 62 * 63 * A netmem_ref currently is always a reference to a struct page. This 64 * abstraction is introduced so support for new memory types can be added. 65 * 66 * Use the supplied helpers to obtain the underlying memory pointer and fields. 67 */ 68 typedef unsigned long __bitwise netmem_ref; 69 70 static inline bool netmem_is_net_iov(const netmem_ref netmem) 71 { 72 return (__force unsigned long)netmem & NET_IOV; 73 } 74 75 /* This conversion fails (returns NULL) if the netmem_ref is not struct page 76 * backed. 77 */ 78 static inline struct page *netmem_to_page(netmem_ref netmem) 79 { 80 if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) 81 return NULL; 82 83 return (__force struct page *)netmem; 84 } 85 86 static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem) 87 { 88 if (netmem_is_net_iov(netmem)) 89 return (struct net_iov *)((__force unsigned long)netmem & 90 ~NET_IOV); 91 92 DEBUG_NET_WARN_ON_ONCE(true); 93 return NULL; 94 } 95 96 static inline netmem_ref net_iov_to_netmem(struct net_iov *niov) 97 { 98 return (__force netmem_ref)((unsigned long)niov | NET_IOV); 99 } 100 101 static inline netmem_ref page_to_netmem(struct page *page) 102 { 103 return (__force netmem_ref)page; 104 } 105 106 static inline int netmem_ref_count(netmem_ref netmem) 107 { 108 /* The non-pp refcount of net_iov is always 1. On net_iov, we only 109 * support pp refcounting which uses the pp_ref_count field. 110 */ 111 if (netmem_is_net_iov(netmem)) 112 return 1; 113 114 return page_ref_count(netmem_to_page(netmem)); 115 } 116 117 static inline unsigned long netmem_pfn_trace(netmem_ref netmem) 118 { 119 if (netmem_is_net_iov(netmem)) 120 return 0; 121 122 return page_to_pfn(netmem_to_page(netmem)); 123 } 124 125 static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem) 126 { 127 return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV); 128 } 129 130 static inline struct page_pool *netmem_get_pp(netmem_ref netmem) 131 { 132 return __netmem_clear_lsb(netmem)->pp; 133 } 134 135 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem) 136 { 137 return &__netmem_clear_lsb(netmem)->pp_ref_count; 138 } 139 140 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid) 141 { 142 /* NUMA node preference only makes sense if we're allocating 143 * system memory. Memory providers (which give us net_iovs) 144 * choose for us. 145 */ 146 if (netmem_is_net_iov(netmem)) 147 return true; 148 149 return page_to_nid(netmem_to_page(netmem)) == pref_nid; 150 } 151 152 static inline netmem_ref netmem_compound_head(netmem_ref netmem) 153 { 154 /* niov are never compounded */ 155 if (netmem_is_net_iov(netmem)) 156 return netmem; 157 158 return page_to_netmem(compound_head(netmem_to_page(netmem))); 159 } 160 161 static inline void *netmem_address(netmem_ref netmem) 162 { 163 if (netmem_is_net_iov(netmem)) 164 return NULL; 165 166 return page_address(netmem_to_page(netmem)); 167 } 168 169 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem) 170 { 171 return __netmem_clear_lsb(netmem)->dma_addr; 172 } 173 174 #endif /* _NET_NETMEM_H */ 175