1 /* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/bpf.h> 8 #include <linux/slab.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/sched/signal.h> 13 #include <net/sock.h> 14 #include <net/tcp.h> 15 16 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 18 { 19 u32 ret; 20 21 preempt_disable(); 22 rcu_read_lock(); 23 bpf_cgroup_storage_set(storage); 24 ret = BPF_PROG_RUN(prog, ctx); 25 rcu_read_unlock(); 26 preempt_enable(); 27 28 return ret; 29 } 30 31 static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) 32 { 33 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 34 enum bpf_cgroup_storage_type stype; 35 u64 time_start, time_spent = 0; 36 u32 ret = 0, i; 37 38 for_each_cgroup_storage_type(stype) { 39 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 40 if (IS_ERR(storage[stype])) { 41 storage[stype] = NULL; 42 for_each_cgroup_storage_type(stype) 43 bpf_cgroup_storage_free(storage[stype]); 44 return -ENOMEM; 45 } 46 } 47 48 if (!repeat) 49 repeat = 1; 50 time_start = ktime_get_ns(); 51 for (i = 0; i < repeat; i++) { 52 ret = bpf_test_run_one(prog, ctx, storage); 53 if (need_resched()) { 54 if (signal_pending(current)) 55 break; 56 time_spent += ktime_get_ns() - time_start; 57 cond_resched(); 58 time_start = ktime_get_ns(); 59 } 60 } 61 time_spent += ktime_get_ns() - time_start; 62 do_div(time_spent, repeat); 63 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 64 65 for_each_cgroup_storage_type(stype) 66 bpf_cgroup_storage_free(storage[stype]); 67 68 return ret; 69 } 70 71 static int bpf_test_finish(const union bpf_attr *kattr, 72 union bpf_attr __user *uattr, const void *data, 73 u32 size, u32 retval, u32 duration) 74 { 75 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 76 int err = -EFAULT; 77 u32 copy_size = size; 78 79 /* Clamp copy if the user has provided a size hint, but copy the full 80 * buffer if not to retain old behaviour. 81 */ 82 if (kattr->test.data_size_out && 83 copy_size > kattr->test.data_size_out) { 84 copy_size = kattr->test.data_size_out; 85 err = -ENOSPC; 86 } 87 88 if (data_out && copy_to_user(data_out, data, copy_size)) 89 goto out; 90 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 91 goto out; 92 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 93 goto out; 94 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 95 goto out; 96 if (err != -ENOSPC) 97 err = 0; 98 out: 99 return err; 100 } 101 102 static void *bpf_test_init(const union bpf_attr *kattr, u32 size, 103 u32 headroom, u32 tailroom) 104 { 105 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 106 void *data; 107 108 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 109 return ERR_PTR(-EINVAL); 110 111 data = kzalloc(size + headroom + tailroom, GFP_USER); 112 if (!data) 113 return ERR_PTR(-ENOMEM); 114 115 if (copy_from_user(data + headroom, data_in, size)) { 116 kfree(data); 117 return ERR_PTR(-EFAULT); 118 } 119 return data; 120 } 121 122 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 123 union bpf_attr __user *uattr) 124 { 125 bool is_l2 = false, is_direct_pkt_access = false; 126 u32 size = kattr->test.data_size_in; 127 u32 repeat = kattr->test.repeat; 128 u32 retval, duration; 129 int hh_len = ETH_HLEN; 130 struct sk_buff *skb; 131 struct sock *sk; 132 void *data; 133 int ret; 134 135 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, 136 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 137 if (IS_ERR(data)) 138 return PTR_ERR(data); 139 140 switch (prog->type) { 141 case BPF_PROG_TYPE_SCHED_CLS: 142 case BPF_PROG_TYPE_SCHED_ACT: 143 is_l2 = true; 144 /* fall through */ 145 case BPF_PROG_TYPE_LWT_IN: 146 case BPF_PROG_TYPE_LWT_OUT: 147 case BPF_PROG_TYPE_LWT_XMIT: 148 is_direct_pkt_access = true; 149 break; 150 default: 151 break; 152 } 153 154 sk = kzalloc(sizeof(struct sock), GFP_USER); 155 if (!sk) { 156 kfree(data); 157 return -ENOMEM; 158 } 159 sock_net_set(sk, current->nsproxy->net_ns); 160 sock_init_data(NULL, sk); 161 162 skb = build_skb(data, 0); 163 if (!skb) { 164 kfree(data); 165 kfree(sk); 166 return -ENOMEM; 167 } 168 skb->sk = sk; 169 170 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 171 __skb_put(skb, size); 172 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); 173 skb_reset_network_header(skb); 174 175 if (is_l2) 176 __skb_push(skb, hh_len); 177 if (is_direct_pkt_access) 178 bpf_compute_data_pointers(skb); 179 retval = bpf_test_run(prog, skb, repeat, &duration); 180 if (!is_l2) { 181 if (skb_headroom(skb) < hh_len) { 182 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 183 184 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 185 kfree_skb(skb); 186 kfree(sk); 187 return -ENOMEM; 188 } 189 } 190 memset(__skb_push(skb, hh_len), 0, hh_len); 191 } 192 193 size = skb->len; 194 /* bpf program can never convert linear skb to non-linear */ 195 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 196 size = skb_headlen(skb); 197 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); 198 kfree_skb(skb); 199 kfree(sk); 200 return ret; 201 } 202 203 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 204 union bpf_attr __user *uattr) 205 { 206 u32 size = kattr->test.data_size_in; 207 u32 repeat = kattr->test.repeat; 208 struct netdev_rx_queue *rxqueue; 209 struct xdp_buff xdp = {}; 210 u32 retval, duration; 211 void *data; 212 int ret; 213 214 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0); 215 if (IS_ERR(data)) 216 return PTR_ERR(data); 217 218 xdp.data_hard_start = data; 219 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; 220 xdp.data_meta = xdp.data; 221 xdp.data_end = xdp.data + size; 222 223 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 224 xdp.rxq = &rxqueue->xdp_rxq; 225 226 retval = bpf_test_run(prog, &xdp, repeat, &duration); 227 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 228 xdp.data_end != xdp.data + size) 229 size = xdp.data_end - xdp.data; 230 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); 231 kfree(data); 232 return ret; 233 } 234