core.c (cc9b94029e9ef51787af908e9856b1eed314bc00) core.c (7bd509e311f408f7a5132fcdde2069af65fa05ae)
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *

--- 122 unchanged lines hidden (view full) ---

131}
132
133void __bpf_prog_free(struct bpf_prog *fp)
134{
135 kfree(fp->aux);
136 vfree(fp);
137}
138
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *

--- 122 unchanged lines hidden (view full) ---

131}
132
133void __bpf_prog_free(struct bpf_prog *fp)
134{
135 kfree(fp->aux);
136 vfree(fp);
137}
138
139#define SHA_BPF_RAW_SIZE \
140 round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
141
142/* Called under verifier mutex. */
143void bpf_prog_calc_digest(struct bpf_prog *fp)
144{
145 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
146 static u32 ws[SHA_WORKSPACE_WORDS];
147 static u8 raw[SHA_BPF_RAW_SIZE];
148 struct bpf_insn *dst = (void *)raw;
149 u32 i, bsize, psize, blocks;
150 bool was_ld_map;
151 u8 *todo = raw;
152 __be32 *result;
153 __be64 *bits;
154
155 sha_init(fp->digest);
156 memset(ws, 0, sizeof(ws));
157
158 /* We need to take out the map fd for the digest calculation
159 * since they are unstable from user space side.
160 */
161 for (i = 0, was_ld_map = false; i < fp->len; i++) {
162 dst[i] = fp->insnsi[i];
163 if (!was_ld_map &&
164 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
165 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
166 was_ld_map = true;
167 dst[i].imm = 0;
168 } else if (was_ld_map &&
169 dst[i].code == 0 &&
170 dst[i].dst_reg == 0 &&
171 dst[i].src_reg == 0 &&
172 dst[i].off == 0) {
173 was_ld_map = false;
174 dst[i].imm = 0;
175 } else {
176 was_ld_map = false;
177 }
178 }
179
180 psize = fp->len * sizeof(struct bpf_insn);
181 memset(&raw[psize], 0, sizeof(raw) - psize);
182 raw[psize++] = 0x80;
183
184 bsize = round_up(psize, SHA_MESSAGE_BYTES);
185 blocks = bsize / SHA_MESSAGE_BYTES;
186 if (bsize - psize >= sizeof(__be64)) {
187 bits = (__be64 *)(todo + bsize - sizeof(__be64));
188 } else {
189 bits = (__be64 *)(todo + bsize + bits_offset);
190 blocks++;
191 }
192 *bits = cpu_to_be64((psize - 1) << 3);
193
194 while (blocks--) {
195 sha_transform(fp->digest, todo, ws);
196 todo += SHA_MESSAGE_BYTES;
197 }
198
199 result = (__force __be32 *)fp->digest;
200 for (i = 0; i < SHA_DIGEST_WORDS; i++)
201 result[i] = cpu_to_be32(fp->digest[i]);
202}
203
139static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
140{
141 return BPF_CLASS(insn->code) == BPF_JMP &&
142 /* Call and Exit are both special jumps with no
143 * target inside the BPF instruction image.
144 */
145 BPF_OP(insn->code) != BPF_CALL &&
146 BPF_OP(insn->code) != BPF_EXIT;

--- 891 unchanged lines hidden (view full) ---

1038
1039/* Weak definitions of helper functions in case we don't have bpf syscall. */
1040const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1041const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1042const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1043
1044const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1045const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
204static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
205{
206 return BPF_CLASS(insn->code) == BPF_JMP &&
207 /* Call and Exit are both special jumps with no
208 * target inside the BPF instruction image.
209 */
210 BPF_OP(insn->code) != BPF_CALL &&
211 BPF_OP(insn->code) != BPF_EXIT;

--- 891 unchanged lines hidden (view full) ---

1103
1104/* Weak definitions of helper functions in case we don't have bpf syscall. */
1105const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1106const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1107const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1108
1109const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1110const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1111const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1046const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1047
1048const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1049const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1050const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1051
1052const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1053{

--- 39 unchanged lines hidden ---
1112const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1113
1114const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1115const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1116const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1117
1118const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1119{

--- 39 unchanged lines hidden ---