1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 #include "../nfp_net_ctrl.h" 46 47 /* --- NFP prog --- */ 48 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 49 * It's safe to modify the next pointers (but not pos). 50 */ 51 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 52 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 53 next = list_next_entry(pos, l); \ 54 &(nfp_prog)->insns != &pos->l && \ 55 &(nfp_prog)->insns != &next->l; \ 56 pos = nfp_meta_next(pos), \ 57 next = nfp_meta_next(pos)) 58 59 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 60 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 61 next = list_next_entry(pos, l), \ 62 next2 = list_next_entry(next, l); \ 63 &(nfp_prog)->insns != &pos->l && \ 64 &(nfp_prog)->insns != &next->l && \ 65 &(nfp_prog)->insns != &next2->l; \ 66 pos = nfp_meta_next(pos), \ 67 next = nfp_meta_next(pos), \ 68 next2 = nfp_meta_next(next)) 69 70 static bool 71 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 72 { 73 return meta->l.prev != &nfp_prog->insns; 74 } 75 76 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 77 { 78 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 79 pr_warn("instruction limit reached (%u NFP instructions)\n", 80 nfp_prog->prog_len); 81 nfp_prog->error = -ENOSPC; 82 return; 83 } 84 85 nfp_prog->prog[nfp_prog->prog_len] = insn; 86 nfp_prog->prog_len++; 87 } 88 89 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 90 { 91 return nfp_prog->prog_len; 92 } 93 94 static bool 95 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 96 { 97 /* If there is a recorded error we may have dropped instructions; 98 * that doesn't have to be due to translator bug, and the translation 99 * will fail anyway, so just return OK. 100 */ 101 if (nfp_prog->error) 102 return true; 103 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 104 } 105 106 /* --- Emitters --- */ 107 static void 108 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 109 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 110 bool indir) 111 { 112 u64 insn; 113 114 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 115 FIELD_PREP(OP_CMD_CTX, ctx) | 116 FIELD_PREP(OP_CMD_B_SRC, breg) | 117 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 118 FIELD_PREP(OP_CMD_XFER, xfer) | 119 FIELD_PREP(OP_CMD_CNT, size) | 120 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 121 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 122 FIELD_PREP(OP_CMD_INDIR, indir) | 123 FIELD_PREP(OP_CMD_MODE, mode); 124 125 nfp_prog_push(nfp_prog, insn); 126 } 127 128 static void 129 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 130 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 131 { 132 struct nfp_insn_re_regs reg; 133 int err; 134 135 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 136 if (err) { 137 nfp_prog->error = err; 138 return; 139 } 140 if (reg.swap) { 141 pr_err("cmd can't swap arguments\n"); 142 nfp_prog->error = -EFAULT; 143 return; 144 } 145 if (reg.dst_lmextn || reg.src_lmextn) { 146 pr_err("cmd can't use LMextn\n"); 147 nfp_prog->error = -EFAULT; 148 return; 149 } 150 151 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 152 indir); 153 } 154 155 static void 156 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 157 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 158 { 159 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 160 } 161 162 static void 163 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 164 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 165 { 166 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 167 } 168 169 static void 170 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 171 enum br_ctx_signal_state css, u16 addr, u8 defer) 172 { 173 u16 addr_lo, addr_hi; 174 u64 insn; 175 176 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 177 addr_hi = addr != addr_lo; 178 179 insn = OP_BR_BASE | 180 FIELD_PREP(OP_BR_MASK, mask) | 181 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 182 FIELD_PREP(OP_BR_CSS, css) | 183 FIELD_PREP(OP_BR_DEFBR, defer) | 184 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 185 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 186 187 nfp_prog_push(nfp_prog, insn); 188 } 189 190 static void 191 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 192 enum nfp_relo_type relo) 193 { 194 if (mask == BR_UNC && defer > 2) { 195 pr_err("BUG: branch defer out of bounds %d\n", defer); 196 nfp_prog->error = -EFAULT; 197 return; 198 } 199 200 __emit_br(nfp_prog, mask, 201 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 202 BR_CSS_NONE, addr, defer); 203 204 nfp_prog->prog[nfp_prog->prog_len - 1] |= 205 FIELD_PREP(OP_RELO_TYPE, relo); 206 } 207 208 static void 209 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 210 { 211 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 212 } 213 214 static void 215 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 216 bool set, bool src_lmextn) 217 { 218 u16 addr_lo, addr_hi; 219 u64 insn; 220 221 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 222 addr_hi = addr != addr_lo; 223 224 insn = OP_BR_BIT_BASE | 225 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 226 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 227 FIELD_PREP(OP_BR_BIT_BV, set) | 228 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 229 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 230 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 231 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 232 233 nfp_prog_push(nfp_prog, insn); 234 } 235 236 static void 237 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 238 u8 defer, bool set, enum nfp_relo_type relo) 239 { 240 struct nfp_insn_re_regs reg; 241 int err; 242 243 /* NOTE: The bit to test is specified as an rotation amount, such that 244 * the bit to test will be placed on the MSB of the result when 245 * doing a rotate right. For bit X, we need right rotate X + 1. 246 */ 247 bit += 1; 248 249 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 250 if (err) { 251 nfp_prog->error = err; 252 return; 253 } 254 255 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 256 reg.src_lmextn); 257 258 nfp_prog->prog[nfp_prog->prog_len - 1] |= 259 FIELD_PREP(OP_RELO_TYPE, relo); 260 } 261 262 static void 263 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 264 { 265 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 266 } 267 268 static void 269 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 270 enum immed_width width, bool invert, 271 enum immed_shift shift, bool wr_both, 272 bool dst_lmextn, bool src_lmextn) 273 { 274 u64 insn; 275 276 insn = OP_IMMED_BASE | 277 FIELD_PREP(OP_IMMED_A_SRC, areg) | 278 FIELD_PREP(OP_IMMED_B_SRC, breg) | 279 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 280 FIELD_PREP(OP_IMMED_WIDTH, width) | 281 FIELD_PREP(OP_IMMED_INV, invert) | 282 FIELD_PREP(OP_IMMED_SHIFT, shift) | 283 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 284 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 285 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 286 287 nfp_prog_push(nfp_prog, insn); 288 } 289 290 static void 291 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 292 enum immed_width width, bool invert, enum immed_shift shift) 293 { 294 struct nfp_insn_ur_regs reg; 295 int err; 296 297 if (swreg_type(dst) == NN_REG_IMM) { 298 nfp_prog->error = -EFAULT; 299 return; 300 } 301 302 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 303 if (err) { 304 nfp_prog->error = err; 305 return; 306 } 307 308 /* Use reg.dst when destination is No-Dest. */ 309 __emit_immed(nfp_prog, 310 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 311 reg.breg, imm >> 8, width, invert, shift, 312 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 313 } 314 315 static void 316 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 317 enum shf_sc sc, u8 shift, 318 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 319 bool dst_lmextn, bool src_lmextn) 320 { 321 u64 insn; 322 323 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 324 nfp_prog->error = -EFAULT; 325 return; 326 } 327 328 if (sc == SHF_SC_L_SHF) 329 shift = 32 - shift; 330 331 insn = OP_SHF_BASE | 332 FIELD_PREP(OP_SHF_A_SRC, areg) | 333 FIELD_PREP(OP_SHF_SC, sc) | 334 FIELD_PREP(OP_SHF_B_SRC, breg) | 335 FIELD_PREP(OP_SHF_I8, i8) | 336 FIELD_PREP(OP_SHF_SW, sw) | 337 FIELD_PREP(OP_SHF_DST, dst) | 338 FIELD_PREP(OP_SHF_SHIFT, shift) | 339 FIELD_PREP(OP_SHF_OP, op) | 340 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 341 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 342 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 343 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 344 345 nfp_prog_push(nfp_prog, insn); 346 } 347 348 static void 349 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 350 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 351 { 352 struct nfp_insn_re_regs reg; 353 int err; 354 355 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 356 if (err) { 357 nfp_prog->error = err; 358 return; 359 } 360 361 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 362 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 363 reg.dst_lmextn, reg.src_lmextn); 364 } 365 366 static void 367 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 368 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 369 { 370 if (sc == SHF_SC_R_ROT) { 371 pr_err("indirect shift is not allowed on rotation\n"); 372 nfp_prog->error = -EFAULT; 373 return; 374 } 375 376 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 377 } 378 379 static void 380 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 381 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 382 bool dst_lmextn, bool src_lmextn) 383 { 384 u64 insn; 385 386 insn = OP_ALU_BASE | 387 FIELD_PREP(OP_ALU_A_SRC, areg) | 388 FIELD_PREP(OP_ALU_B_SRC, breg) | 389 FIELD_PREP(OP_ALU_DST, dst) | 390 FIELD_PREP(OP_ALU_SW, swap) | 391 FIELD_PREP(OP_ALU_OP, op) | 392 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 393 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 394 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 395 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 396 397 nfp_prog_push(nfp_prog, insn); 398 } 399 400 static void 401 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 402 swreg lreg, enum alu_op op, swreg rreg) 403 { 404 struct nfp_insn_ur_regs reg; 405 int err; 406 407 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 408 if (err) { 409 nfp_prog->error = err; 410 return; 411 } 412 413 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 414 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 415 reg.dst_lmextn, reg.src_lmextn); 416 } 417 418 static void 419 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 420 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 421 bool zero, bool swap, bool wr_both, 422 bool dst_lmextn, bool src_lmextn) 423 { 424 u64 insn; 425 426 insn = OP_LDF_BASE | 427 FIELD_PREP(OP_LDF_A_SRC, areg) | 428 FIELD_PREP(OP_LDF_SC, sc) | 429 FIELD_PREP(OP_LDF_B_SRC, breg) | 430 FIELD_PREP(OP_LDF_I8, imm8) | 431 FIELD_PREP(OP_LDF_SW, swap) | 432 FIELD_PREP(OP_LDF_ZF, zero) | 433 FIELD_PREP(OP_LDF_BMASK, bmask) | 434 FIELD_PREP(OP_LDF_SHF, shift) | 435 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 436 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 444 enum shf_sc sc, u8 shift, bool zero) 445 { 446 struct nfp_insn_re_regs reg; 447 int err; 448 449 /* Note: ld_field is special as it uses one of the src regs as dst */ 450 err = swreg_to_restricted(dst, dst, src, ®, true); 451 if (err) { 452 nfp_prog->error = err; 453 return; 454 } 455 456 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 457 reg.i8, zero, reg.swap, reg.wr_both, 458 reg.dst_lmextn, reg.src_lmextn); 459 } 460 461 static void 462 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 463 enum shf_sc sc, u8 shift) 464 { 465 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 466 } 467 468 static void 469 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 470 bool dst_lmextn, bool src_lmextn) 471 { 472 u64 insn; 473 474 insn = OP_LCSR_BASE | 475 FIELD_PREP(OP_LCSR_A_SRC, areg) | 476 FIELD_PREP(OP_LCSR_B_SRC, breg) | 477 FIELD_PREP(OP_LCSR_WRITE, wr) | 478 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 479 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 480 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 481 482 nfp_prog_push(nfp_prog, insn); 483 } 484 485 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 486 { 487 struct nfp_insn_ur_regs reg; 488 int err; 489 490 /* This instruction takes immeds instead of reg_none() for the ignored 491 * operand, but we can't encode 2 immeds in one instr with our normal 492 * swreg infra so if param is an immed, we encode as reg_none() and 493 * copy the immed to both operands. 494 */ 495 if (swreg_type(src) == NN_REG_IMM) { 496 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 497 reg.breg = reg.areg; 498 } else { 499 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 500 } 501 if (err) { 502 nfp_prog->error = err; 503 return; 504 } 505 506 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 507 false, reg.src_lmextn); 508 } 509 510 /* CSR value is read in following immed[gpr, 0] */ 511 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 512 { 513 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 514 } 515 516 static void emit_nop(struct nfp_prog *nfp_prog) 517 { 518 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 519 } 520 521 /* --- Wrappers --- */ 522 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 523 { 524 if (!(imm & 0xffff0000)) { 525 *val = imm; 526 *shift = IMMED_SHIFT_0B; 527 } else if (!(imm & 0xff0000ff)) { 528 *val = imm >> 8; 529 *shift = IMMED_SHIFT_1B; 530 } else if (!(imm & 0x0000ffff)) { 531 *val = imm >> 16; 532 *shift = IMMED_SHIFT_2B; 533 } else { 534 return false; 535 } 536 537 return true; 538 } 539 540 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 541 { 542 enum immed_shift shift; 543 u16 val; 544 545 if (pack_immed(imm, &val, &shift)) { 546 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 547 } else if (pack_immed(~imm, &val, &shift)) { 548 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 549 } else { 550 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 551 false, IMMED_SHIFT_0B); 552 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 553 false, IMMED_SHIFT_2B); 554 } 555 } 556 557 static void 558 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 559 enum nfp_relo_type relo) 560 { 561 if (imm > 0xffff) { 562 pr_err("relocation of a large immediate!\n"); 563 nfp_prog->error = -EFAULT; 564 return; 565 } 566 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 567 568 nfp_prog->prog[nfp_prog->prog_len - 1] |= 569 FIELD_PREP(OP_RELO_TYPE, relo); 570 } 571 572 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 573 * If the @imm is small enough encode it directly in operand and return 574 * otherwise load @imm to a spare register and return its encoding. 575 */ 576 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 577 { 578 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 579 return reg_imm(imm); 580 581 wrp_immed(nfp_prog, tmp_reg, imm); 582 return tmp_reg; 583 } 584 585 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 586 * If the @imm is small enough encode it directly in operand and return 587 * otherwise load @imm to a spare register and return its encoding. 588 */ 589 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 590 { 591 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 592 return reg_imm(imm); 593 594 wrp_immed(nfp_prog, tmp_reg, imm); 595 return tmp_reg; 596 } 597 598 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 599 { 600 while (count--) 601 emit_nop(nfp_prog); 602 } 603 604 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 605 { 606 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 607 } 608 609 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 610 { 611 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 612 } 613 614 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 615 * result to @dst from low end. 616 */ 617 static void 618 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 619 u8 offset) 620 { 621 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 622 u8 mask = (1 << field_len) - 1; 623 624 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 625 } 626 627 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 628 * result to @dst from offset, there is no change on the other bits of @dst. 629 */ 630 static void 631 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 632 u8 field_len, u8 offset) 633 { 634 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 635 u8 mask = ((1 << field_len) - 1) << offset; 636 637 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 638 } 639 640 static void 641 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 642 swreg *rega, swreg *regb) 643 { 644 if (offset == reg_imm(0)) { 645 *rega = reg_a(src_gpr); 646 *regb = reg_b(src_gpr + 1); 647 return; 648 } 649 650 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 651 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 652 reg_imm(0)); 653 *rega = imm_a(nfp_prog); 654 *regb = imm_b(nfp_prog); 655 } 656 657 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 658 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 659 { 660 bool descending_seq = meta->ldst_gather_len < 0; 661 s16 len = abs(meta->ldst_gather_len); 662 swreg src_base, off; 663 bool src_40bit_addr; 664 unsigned int i; 665 u8 xfer_num; 666 667 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 668 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 669 src_base = reg_a(meta->insn.src_reg * 2); 670 xfer_num = round_up(len, 4) / 4; 671 672 if (src_40bit_addr) 673 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 674 &off); 675 676 /* Setup PREV_ALU fields to override memory read length. */ 677 if (len > 32) 678 wrp_immed(nfp_prog, reg_none(), 679 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 680 681 /* Memory read from source addr into transfer-in registers. */ 682 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 683 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 684 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 685 686 /* Move from transfer-in to transfer-out. */ 687 for (i = 0; i < xfer_num; i++) 688 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 689 690 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 691 692 if (len <= 8) { 693 /* Use single direct_ref write8. */ 694 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 695 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 696 CMD_CTX_SWAP); 697 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 698 /* Use single direct_ref write32. */ 699 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 700 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 701 CMD_CTX_SWAP); 702 } else if (len <= 32) { 703 /* Use single indirect_ref write8. */ 704 wrp_immed(nfp_prog, reg_none(), 705 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 706 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 707 reg_a(meta->paired_st->dst_reg * 2), off, 708 len - 1, CMD_CTX_SWAP); 709 } else if (IS_ALIGNED(len, 4)) { 710 /* Use single indirect_ref write32. */ 711 wrp_immed(nfp_prog, reg_none(), 712 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 713 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 714 reg_a(meta->paired_st->dst_reg * 2), off, 715 xfer_num - 1, CMD_CTX_SWAP); 716 } else if (len <= 40) { 717 /* Use one direct_ref write32 to write the first 32-bytes, then 718 * another direct_ref write8 to write the remaining bytes. 719 */ 720 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 721 reg_a(meta->paired_st->dst_reg * 2), off, 7, 722 CMD_CTX_SWAP); 723 724 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 725 imm_b(nfp_prog)); 726 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 727 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 728 CMD_CTX_SWAP); 729 } else { 730 /* Use one indirect_ref write32 to write 4-bytes aligned length, 731 * then another direct_ref write8 to write the remaining bytes. 732 */ 733 u8 new_off; 734 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 737 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 738 reg_a(meta->paired_st->dst_reg * 2), off, 739 xfer_num - 2, CMD_CTX_SWAP); 740 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 741 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 742 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 743 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 744 (len & 0x3) - 1, CMD_CTX_SWAP); 745 } 746 747 /* TODO: The following extra load is to make sure data flow be identical 748 * before and after we do memory copy optimization. 749 * 750 * The load destination register is not guaranteed to be dead, so we 751 * need to make sure it is loaded with the value the same as before 752 * this transformation. 753 * 754 * These extra loads could be removed once we have accurate register 755 * usage information. 756 */ 757 if (descending_seq) 758 xfer_num = 0; 759 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 760 xfer_num = xfer_num - 1; 761 else 762 xfer_num = xfer_num - 2; 763 764 switch (BPF_SIZE(meta->insn.code)) { 765 case BPF_B: 766 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 767 reg_xfer(xfer_num), 1, 768 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 769 break; 770 case BPF_H: 771 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 772 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 773 break; 774 case BPF_W: 775 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 776 reg_xfer(0)); 777 break; 778 case BPF_DW: 779 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 780 reg_xfer(xfer_num)); 781 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 782 reg_xfer(xfer_num + 1)); 783 break; 784 } 785 786 if (BPF_SIZE(meta->insn.code) != BPF_DW) 787 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 788 789 return 0; 790 } 791 792 static int 793 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 794 { 795 unsigned int i; 796 u16 shift, sz; 797 798 /* We load the value from the address indicated in @offset and then 799 * shift out the data we don't need. Note: this is big endian! 800 */ 801 sz = max(size, 4); 802 shift = size < 4 ? 4 - size : 0; 803 804 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 805 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 806 807 i = 0; 808 if (shift) 809 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 810 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 811 else 812 for (; i * 4 < size; i++) 813 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 814 815 if (i < 2) 816 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 817 818 return 0; 819 } 820 821 static int 822 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 823 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 824 { 825 unsigned int i; 826 u8 mask, sz; 827 828 /* We load the value from the address indicated in rreg + lreg and then 829 * mask out the data we don't need. Note: this is little endian! 830 */ 831 sz = max(size, 4); 832 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 833 834 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 835 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 836 837 i = 0; 838 if (mask) 839 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 840 reg_xfer(0), SHF_SC_NONE, 0, true); 841 else 842 for (; i * 4 < size; i++) 843 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 844 845 if (i < 2) 846 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 847 848 return 0; 849 } 850 851 static int 852 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 853 u8 dst_gpr, u8 size) 854 { 855 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 856 size, CMD_MODE_32b); 857 } 858 859 static int 860 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 861 u8 dst_gpr, u8 size) 862 { 863 swreg rega, regb; 864 865 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 866 867 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 868 size, CMD_MODE_40b_BA); 869 } 870 871 static int 872 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 873 { 874 swreg tmp_reg; 875 876 /* Calculate the true offset (src_reg + imm) */ 877 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 878 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 879 880 /* Check packet length (size guaranteed to fit b/c it's u8) */ 881 emit_alu(nfp_prog, imm_a(nfp_prog), 882 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 883 emit_alu(nfp_prog, reg_none(), 884 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 885 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 886 887 /* Load data */ 888 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 889 } 890 891 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 892 { 893 swreg tmp_reg; 894 895 /* Check packet length */ 896 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 897 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 898 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 899 900 /* Load data */ 901 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 902 return data_ld(nfp_prog, tmp_reg, 0, size); 903 } 904 905 static int 906 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 907 u8 src_gpr, u8 size) 908 { 909 unsigned int i; 910 911 for (i = 0; i * 4 < size; i++) 912 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 913 914 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 915 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 916 917 return 0; 918 } 919 920 static int 921 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 922 u64 imm, u8 size) 923 { 924 wrp_immed(nfp_prog, reg_xfer(0), imm); 925 if (size == 8) 926 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 927 928 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 929 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 930 931 return 0; 932 } 933 934 typedef int 935 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 936 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 937 bool needs_inc); 938 939 static int 940 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 941 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 942 bool needs_inc) 943 { 944 bool should_inc = needs_inc && new_gpr && !last; 945 u32 idx, src_byte; 946 enum shf_sc sc; 947 swreg reg; 948 int shf; 949 u8 mask; 950 951 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 952 return -EOPNOTSUPP; 953 954 idx = off / 4; 955 956 /* Move the entire word */ 957 if (size == 4) { 958 wrp_mov(nfp_prog, reg_both(dst), 959 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 960 return 0; 961 } 962 963 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 964 return -EOPNOTSUPP; 965 966 src_byte = off % 4; 967 968 mask = (1 << size) - 1; 969 mask <<= dst_byte; 970 971 if (WARN_ON_ONCE(mask > 0xf)) 972 return -EOPNOTSUPP; 973 974 shf = abs(src_byte - dst_byte) * 8; 975 if (src_byte == dst_byte) { 976 sc = SHF_SC_NONE; 977 } else if (src_byte < dst_byte) { 978 shf = 32 - shf; 979 sc = SHF_SC_L_SHF; 980 } else { 981 sc = SHF_SC_R_SHF; 982 } 983 984 /* ld_field can address fewer indexes, if offset too large do RMW. 985 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 986 */ 987 if (idx <= RE_REG_LM_IDX_MAX) { 988 reg = reg_lm(lm3 ? 3 : 0, idx); 989 } else { 990 reg = imm_a(nfp_prog); 991 /* If it's not the first part of the load and we start a new GPR 992 * that means we are loading a second part of the LMEM word into 993 * a new GPR. IOW we've already looked that LMEM word and 994 * therefore it has been loaded into imm_a(). 995 */ 996 if (first || !new_gpr) 997 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 998 } 999 1000 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1001 1002 if (should_inc) 1003 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1004 1005 return 0; 1006 } 1007 1008 static int 1009 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1010 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1011 bool needs_inc) 1012 { 1013 bool should_inc = needs_inc && new_gpr && !last; 1014 u32 idx, dst_byte; 1015 enum shf_sc sc; 1016 swreg reg; 1017 int shf; 1018 u8 mask; 1019 1020 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1021 return -EOPNOTSUPP; 1022 1023 idx = off / 4; 1024 1025 /* Move the entire word */ 1026 if (size == 4) { 1027 wrp_mov(nfp_prog, 1028 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1029 reg_b(src)); 1030 return 0; 1031 } 1032 1033 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1034 return -EOPNOTSUPP; 1035 1036 dst_byte = off % 4; 1037 1038 mask = (1 << size) - 1; 1039 mask <<= dst_byte; 1040 1041 if (WARN_ON_ONCE(mask > 0xf)) 1042 return -EOPNOTSUPP; 1043 1044 shf = abs(src_byte - dst_byte) * 8; 1045 if (src_byte == dst_byte) { 1046 sc = SHF_SC_NONE; 1047 } else if (src_byte < dst_byte) { 1048 shf = 32 - shf; 1049 sc = SHF_SC_L_SHF; 1050 } else { 1051 sc = SHF_SC_R_SHF; 1052 } 1053 1054 /* ld_field can address fewer indexes, if offset too large do RMW. 1055 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1056 */ 1057 if (idx <= RE_REG_LM_IDX_MAX) { 1058 reg = reg_lm(lm3 ? 3 : 0, idx); 1059 } else { 1060 reg = imm_a(nfp_prog); 1061 /* Only first and last LMEM locations are going to need RMW, 1062 * the middle location will be overwritten fully. 1063 */ 1064 if (first || last) 1065 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1066 } 1067 1068 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1069 1070 if (new_gpr || last) { 1071 if (idx > RE_REG_LM_IDX_MAX) 1072 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1073 if (should_inc) 1074 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1075 } 1076 1077 return 0; 1078 } 1079 1080 static int 1081 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1082 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1083 bool clr_gpr, lmem_step step) 1084 { 1085 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1086 bool first = true, last; 1087 bool needs_inc = false; 1088 swreg stack_off_reg; 1089 u8 prev_gpr = 255; 1090 u32 gpr_byte = 0; 1091 bool lm3 = true; 1092 int ret; 1093 1094 if (meta->ptr_not_const) { 1095 /* Use of the last encountered ptr_off is OK, they all have 1096 * the same alignment. Depend on low bits of value being 1097 * discarded when written to LMaddr register. 1098 */ 1099 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1100 stack_imm(nfp_prog)); 1101 1102 emit_alu(nfp_prog, imm_b(nfp_prog), 1103 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1104 1105 needs_inc = true; 1106 } else if (off + size <= 64) { 1107 /* We can reach bottom 64B with LMaddr0 */ 1108 lm3 = false; 1109 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1110 /* We have to set up a new pointer. If we know the offset 1111 * and the entire access falls into a single 32 byte aligned 1112 * window we won't have to increment the LM pointer. 1113 * The 32 byte alignment is imporant because offset is ORed in 1114 * not added when doing *l$indexN[off]. 1115 */ 1116 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1117 stack_imm(nfp_prog)); 1118 emit_alu(nfp_prog, imm_b(nfp_prog), 1119 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1120 1121 off %= 32; 1122 } else { 1123 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1124 stack_imm(nfp_prog)); 1125 1126 emit_alu(nfp_prog, imm_b(nfp_prog), 1127 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1128 1129 needs_inc = true; 1130 } 1131 if (lm3) { 1132 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1133 /* For size < 4 one slot will be filled by zeroing of upper. */ 1134 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1135 } 1136 1137 if (clr_gpr && size < 8) 1138 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1139 1140 while (size) { 1141 u32 slice_end; 1142 u8 slice_size; 1143 1144 slice_size = min(size, 4 - gpr_byte); 1145 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1146 slice_size = slice_end - off; 1147 1148 last = slice_size == size; 1149 1150 if (needs_inc) 1151 off %= 4; 1152 1153 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1154 first, gpr != prev_gpr, last, lm3, needs_inc); 1155 if (ret) 1156 return ret; 1157 1158 prev_gpr = gpr; 1159 first = false; 1160 1161 gpr_byte += slice_size; 1162 if (gpr_byte >= 4) { 1163 gpr_byte -= 4; 1164 gpr++; 1165 } 1166 1167 size -= slice_size; 1168 off += slice_size; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static void 1175 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1176 { 1177 swreg tmp_reg; 1178 1179 if (alu_op == ALU_OP_AND) { 1180 if (!imm) 1181 wrp_immed(nfp_prog, reg_both(dst), 0); 1182 if (!imm || !~imm) 1183 return; 1184 } 1185 if (alu_op == ALU_OP_OR) { 1186 if (!~imm) 1187 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1188 if (!imm || !~imm) 1189 return; 1190 } 1191 if (alu_op == ALU_OP_XOR) { 1192 if (!~imm) 1193 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1194 ALU_OP_NOT, reg_b(dst)); 1195 if (!imm || !~imm) 1196 return; 1197 } 1198 1199 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1200 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1201 } 1202 1203 static int 1204 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1205 enum alu_op alu_op, bool skip) 1206 { 1207 const struct bpf_insn *insn = &meta->insn; 1208 u64 imm = insn->imm; /* sign extend */ 1209 1210 if (skip) { 1211 meta->skip = true; 1212 return 0; 1213 } 1214 1215 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1216 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1217 1218 return 0; 1219 } 1220 1221 static int 1222 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1223 enum alu_op alu_op) 1224 { 1225 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1226 1227 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1228 emit_alu(nfp_prog, reg_both(dst + 1), 1229 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1230 1231 return 0; 1232 } 1233 1234 static int 1235 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1236 enum alu_op alu_op, bool skip) 1237 { 1238 const struct bpf_insn *insn = &meta->insn; 1239 1240 if (skip) { 1241 meta->skip = true; 1242 return 0; 1243 } 1244 1245 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1246 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1247 1248 return 0; 1249 } 1250 1251 static int 1252 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1253 enum alu_op alu_op) 1254 { 1255 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1256 1257 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1258 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1259 1260 return 0; 1261 } 1262 1263 static void 1264 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1265 enum br_mask br_mask, u16 off) 1266 { 1267 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1268 emit_br(nfp_prog, br_mask, off, 0); 1269 } 1270 1271 static int 1272 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1273 enum alu_op alu_op, enum br_mask br_mask) 1274 { 1275 const struct bpf_insn *insn = &meta->insn; 1276 1277 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1278 insn->src_reg * 2, br_mask, insn->off); 1279 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1280 insn->src_reg * 2 + 1, br_mask, insn->off); 1281 1282 return 0; 1283 } 1284 1285 static const struct jmp_code_map { 1286 enum br_mask br_mask; 1287 bool swap; 1288 } jmp_code_map[] = { 1289 [BPF_JGT >> 4] = { BR_BLO, true }, 1290 [BPF_JGE >> 4] = { BR_BHS, false }, 1291 [BPF_JLT >> 4] = { BR_BLO, false }, 1292 [BPF_JLE >> 4] = { BR_BHS, true }, 1293 [BPF_JSGT >> 4] = { BR_BLT, true }, 1294 [BPF_JSGE >> 4] = { BR_BGE, false }, 1295 [BPF_JSLT >> 4] = { BR_BLT, false }, 1296 [BPF_JSLE >> 4] = { BR_BGE, true }, 1297 }; 1298 1299 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1300 { 1301 unsigned int op; 1302 1303 op = BPF_OP(meta->insn.code) >> 4; 1304 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1305 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1306 !jmp_code_map[op].br_mask, 1307 "no code found for jump instruction")) 1308 return NULL; 1309 1310 return &jmp_code_map[op]; 1311 } 1312 1313 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1314 { 1315 const struct bpf_insn *insn = &meta->insn; 1316 u64 imm = insn->imm; /* sign extend */ 1317 const struct jmp_code_map *code; 1318 enum alu_op alu_op, carry_op; 1319 u8 reg = insn->dst_reg * 2; 1320 swreg tmp_reg; 1321 1322 code = nfp_jmp_code_get(meta); 1323 if (!code) 1324 return -EINVAL; 1325 1326 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1327 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1328 1329 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1330 if (!code->swap) 1331 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1332 else 1333 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1334 1335 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1336 if (!code->swap) 1337 emit_alu(nfp_prog, reg_none(), 1338 reg_a(reg + 1), carry_op, tmp_reg); 1339 else 1340 emit_alu(nfp_prog, reg_none(), 1341 tmp_reg, carry_op, reg_a(reg + 1)); 1342 1343 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1344 1345 return 0; 1346 } 1347 1348 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1349 { 1350 const struct bpf_insn *insn = &meta->insn; 1351 const struct jmp_code_map *code; 1352 u8 areg, breg; 1353 1354 code = nfp_jmp_code_get(meta); 1355 if (!code) 1356 return -EINVAL; 1357 1358 areg = insn->dst_reg * 2; 1359 breg = insn->src_reg * 2; 1360 1361 if (code->swap) { 1362 areg ^= breg; 1363 breg ^= areg; 1364 areg ^= breg; 1365 } 1366 1367 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1368 emit_alu(nfp_prog, reg_none(), 1369 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1370 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1371 1372 return 0; 1373 } 1374 1375 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1376 { 1377 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1378 SHF_SC_R_ROT, 8); 1379 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1380 SHF_SC_R_ROT, 16); 1381 } 1382 1383 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1384 { 1385 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1386 struct nfp_bpf_cap_adjust_head *adjust_head; 1387 u32 ret_einval, end; 1388 1389 adjust_head = &nfp_prog->bpf->adjust_head; 1390 1391 /* Optimized version - 5 vs 14 cycles */ 1392 if (nfp_prog->adjust_head_location != UINT_MAX) { 1393 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1394 return -EINVAL; 1395 1396 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1397 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1398 emit_alu(nfp_prog, plen_reg(nfp_prog), 1399 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1400 emit_alu(nfp_prog, pv_len(nfp_prog), 1401 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1402 1403 wrp_immed(nfp_prog, reg_both(0), 0); 1404 wrp_immed(nfp_prog, reg_both(1), 0); 1405 1406 /* TODO: when adjust head is guaranteed to succeed we can 1407 * also eliminate the following if (r0 == 0) branch. 1408 */ 1409 1410 return 0; 1411 } 1412 1413 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1414 end = ret_einval + 2; 1415 1416 /* We need to use a temp because offset is just a part of the pkt ptr */ 1417 emit_alu(nfp_prog, tmp, 1418 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1419 1420 /* Validate result will fit within FW datapath constraints */ 1421 emit_alu(nfp_prog, reg_none(), 1422 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1423 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1424 emit_alu(nfp_prog, reg_none(), 1425 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1426 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1427 1428 /* Validate the length is at least ETH_HLEN */ 1429 emit_alu(nfp_prog, tmp_len, 1430 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1431 emit_alu(nfp_prog, reg_none(), 1432 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1433 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1434 1435 /* Load the ret code */ 1436 wrp_immed(nfp_prog, reg_both(0), 0); 1437 wrp_immed(nfp_prog, reg_both(1), 0); 1438 1439 /* Modify the packet metadata */ 1440 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1441 1442 /* Skip over the -EINVAL ret code (defer 2) */ 1443 emit_br(nfp_prog, BR_UNC, end, 2); 1444 1445 emit_alu(nfp_prog, plen_reg(nfp_prog), 1446 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1447 emit_alu(nfp_prog, pv_len(nfp_prog), 1448 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1449 1450 /* return -EINVAL target */ 1451 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1452 return -EINVAL; 1453 1454 wrp_immed(nfp_prog, reg_both(0), -22); 1455 wrp_immed(nfp_prog, reg_both(1), ~0); 1456 1457 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1458 return -EINVAL; 1459 1460 return 0; 1461 } 1462 1463 static int 1464 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1465 { 1466 bool load_lm_ptr; 1467 u32 ret_tgt; 1468 s64 lm_off; 1469 1470 /* We only have to reload LM0 if the key is not at start of stack */ 1471 lm_off = nfp_prog->stack_depth; 1472 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1473 load_lm_ptr = meta->arg2.var_off || lm_off; 1474 1475 /* Set LM0 to start of key */ 1476 if (load_lm_ptr) 1477 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1478 if (meta->func_id == BPF_FUNC_map_update_elem) 1479 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1480 1481 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1482 2, RELO_BR_HELPER); 1483 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1484 1485 /* Load map ID into A0 */ 1486 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1487 1488 /* Load the return address into B0 */ 1489 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1490 1491 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1492 return -EINVAL; 1493 1494 /* Reset the LM0 pointer */ 1495 if (!load_lm_ptr) 1496 return 0; 1497 1498 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1499 wrp_nops(nfp_prog, 3); 1500 1501 return 0; 1502 } 1503 1504 static int 1505 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1506 { 1507 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1508 /* CSR value is read in following immed[gpr, 0] */ 1509 emit_immed(nfp_prog, reg_both(0), 0, 1510 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1511 emit_immed(nfp_prog, reg_both(1), 0, 1512 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1513 return 0; 1514 } 1515 1516 static int 1517 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1518 { 1519 swreg ptr_type; 1520 u32 ret_tgt; 1521 1522 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1523 1524 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1525 1526 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1527 2, RELO_BR_HELPER); 1528 1529 /* Load ptr type into A1 */ 1530 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1531 1532 /* Load the return address into B0 */ 1533 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1534 1535 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1536 return -EINVAL; 1537 1538 return 0; 1539 } 1540 1541 static int 1542 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1543 { 1544 u32 jmp_tgt; 1545 1546 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1547 1548 /* Make sure the queue id fits into FW field */ 1549 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1550 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1551 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1552 1553 /* Set the 'queue selected' bit and the queue value */ 1554 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1555 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1556 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1557 emit_ld_field(nfp_prog, 1558 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1559 SHF_SC_NONE, 0); 1560 /* Delay slots end here, we will jump over next instruction if queue 1561 * value fits into the field. 1562 */ 1563 emit_ld_field(nfp_prog, 1564 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1565 SHF_SC_NONE, 0); 1566 1567 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1568 return -EINVAL; 1569 1570 return 0; 1571 } 1572 1573 /* --- Callbacks --- */ 1574 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1575 { 1576 const struct bpf_insn *insn = &meta->insn; 1577 u8 dst = insn->dst_reg * 2; 1578 u8 src = insn->src_reg * 2; 1579 1580 if (insn->src_reg == BPF_REG_10) { 1581 swreg stack_depth_reg; 1582 1583 stack_depth_reg = ur_load_imm_any(nfp_prog, 1584 nfp_prog->stack_depth, 1585 stack_imm(nfp_prog)); 1586 emit_alu(nfp_prog, reg_both(dst), 1587 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1588 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1589 } else { 1590 wrp_reg_mov(nfp_prog, dst, src); 1591 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1592 } 1593 1594 return 0; 1595 } 1596 1597 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1598 { 1599 u64 imm = meta->insn.imm; /* sign extend */ 1600 1601 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1602 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1603 1604 return 0; 1605 } 1606 1607 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1608 { 1609 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1610 } 1611 1612 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1613 { 1614 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1615 } 1616 1617 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1618 { 1619 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1620 } 1621 1622 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1623 { 1624 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1625 } 1626 1627 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1628 { 1629 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1630 } 1631 1632 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1633 { 1634 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1635 } 1636 1637 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1638 { 1639 const struct bpf_insn *insn = &meta->insn; 1640 1641 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1642 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1643 reg_b(insn->src_reg * 2)); 1644 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1645 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1646 reg_b(insn->src_reg * 2 + 1)); 1647 1648 return 0; 1649 } 1650 1651 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1652 { 1653 const struct bpf_insn *insn = &meta->insn; 1654 u64 imm = insn->imm; /* sign extend */ 1655 1656 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1657 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1658 1659 return 0; 1660 } 1661 1662 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1663 { 1664 const struct bpf_insn *insn = &meta->insn; 1665 1666 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1667 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1668 reg_b(insn->src_reg * 2)); 1669 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1670 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1671 reg_b(insn->src_reg * 2 + 1)); 1672 1673 return 0; 1674 } 1675 1676 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1677 { 1678 const struct bpf_insn *insn = &meta->insn; 1679 u64 imm = insn->imm; /* sign extend */ 1680 1681 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1682 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1683 1684 return 0; 1685 } 1686 1687 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1688 { 1689 const struct bpf_insn *insn = &meta->insn; 1690 1691 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1692 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1693 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1694 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1695 1696 return 0; 1697 } 1698 1699 /* Pseudo code: 1700 * if shift_amt >= 32 1701 * dst_high = dst_low << shift_amt[4:0] 1702 * dst_low = 0; 1703 * else 1704 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1705 * dst_low = dst_low << shift_amt 1706 * 1707 * The indirect shift will use the same logic at runtime. 1708 */ 1709 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1710 { 1711 if (shift_amt < 32) { 1712 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1713 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1714 32 - shift_amt); 1715 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1716 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1717 } else if (shift_amt == 32) { 1718 wrp_reg_mov(nfp_prog, dst + 1, dst); 1719 wrp_immed(nfp_prog, reg_both(dst), 0); 1720 } else if (shift_amt > 32) { 1721 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1722 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1723 wrp_immed(nfp_prog, reg_both(dst), 0); 1724 } 1725 1726 return 0; 1727 } 1728 1729 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1730 { 1731 const struct bpf_insn *insn = &meta->insn; 1732 u8 dst = insn->dst_reg * 2; 1733 1734 return __shl_imm64(nfp_prog, dst, insn->imm); 1735 } 1736 1737 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1738 { 1739 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1740 reg_b(src)); 1741 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 1742 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 1743 reg_b(dst), SHF_SC_R_DSHF); 1744 } 1745 1746 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 1747 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1748 { 1749 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1750 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1751 reg_b(dst), SHF_SC_L_SHF); 1752 } 1753 1754 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1755 { 1756 shl_reg64_lt32_high(nfp_prog, dst, src); 1757 shl_reg64_lt32_low(nfp_prog, dst, src); 1758 } 1759 1760 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1761 { 1762 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1763 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1764 reg_b(dst), SHF_SC_L_SHF); 1765 wrp_immed(nfp_prog, reg_both(dst), 0); 1766 } 1767 1768 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1769 { 1770 const struct bpf_insn *insn = &meta->insn; 1771 u64 umin, umax; 1772 u8 dst, src; 1773 1774 dst = insn->dst_reg * 2; 1775 umin = meta->umin; 1776 umax = meta->umax; 1777 if (umin == umax) 1778 return __shl_imm64(nfp_prog, dst, umin); 1779 1780 src = insn->src_reg * 2; 1781 if (umax < 32) { 1782 shl_reg64_lt32(nfp_prog, dst, src); 1783 } else if (umin >= 32) { 1784 shl_reg64_ge32(nfp_prog, dst, src); 1785 } else { 1786 /* Generate different instruction sequences depending on runtime 1787 * value of shift amount. 1788 */ 1789 u16 label_ge32, label_end; 1790 1791 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 1792 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 1793 1794 shl_reg64_lt32_high(nfp_prog, dst, src); 1795 label_end = nfp_prog_current_offset(nfp_prog) + 6; 1796 emit_br(nfp_prog, BR_UNC, label_end, 2); 1797 /* shl_reg64_lt32_low packed in delay slot. */ 1798 shl_reg64_lt32_low(nfp_prog, dst, src); 1799 1800 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 1801 return -EINVAL; 1802 shl_reg64_ge32(nfp_prog, dst, src); 1803 1804 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 1805 return -EINVAL; 1806 } 1807 1808 return 0; 1809 } 1810 1811 /* Pseudo code: 1812 * if shift_amt >= 32 1813 * dst_high = 0; 1814 * dst_low = dst_high >> shift_amt[4:0] 1815 * else 1816 * dst_high = dst_high >> shift_amt 1817 * dst_low = (dst_high, dst_low) >> shift_amt 1818 * 1819 * The indirect shift will use the same logic at runtime. 1820 */ 1821 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1822 { 1823 if (shift_amt < 32) { 1824 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1825 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 1826 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1827 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 1828 } else if (shift_amt == 32) { 1829 wrp_reg_mov(nfp_prog, dst, dst + 1); 1830 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1831 } else if (shift_amt > 32) { 1832 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1833 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 1834 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1835 } 1836 1837 return 0; 1838 } 1839 1840 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1841 { 1842 const struct bpf_insn *insn = &meta->insn; 1843 u8 dst = insn->dst_reg * 2; 1844 1845 return __shr_imm64(nfp_prog, dst, insn->imm); 1846 } 1847 1848 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 1849 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1850 { 1851 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1852 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1853 reg_b(dst + 1), SHF_SC_R_SHF); 1854 } 1855 1856 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1857 { 1858 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1859 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1860 reg_b(dst), SHF_SC_R_DSHF); 1861 } 1862 1863 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1864 { 1865 shr_reg64_lt32_low(nfp_prog, dst, src); 1866 shr_reg64_lt32_high(nfp_prog, dst, src); 1867 } 1868 1869 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1870 { 1871 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1872 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1873 reg_b(dst + 1), SHF_SC_R_SHF); 1874 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1875 } 1876 1877 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1878 { 1879 const struct bpf_insn *insn = &meta->insn; 1880 u64 umin, umax; 1881 u8 dst, src; 1882 1883 dst = insn->dst_reg * 2; 1884 umin = meta->umin; 1885 umax = meta->umax; 1886 if (umin == umax) 1887 return __shr_imm64(nfp_prog, dst, umin); 1888 1889 src = insn->src_reg * 2; 1890 if (umax < 32) { 1891 shr_reg64_lt32(nfp_prog, dst, src); 1892 } else if (umin >= 32) { 1893 shr_reg64_ge32(nfp_prog, dst, src); 1894 } else { 1895 /* Generate different instruction sequences depending on runtime 1896 * value of shift amount. 1897 */ 1898 u16 label_ge32, label_end; 1899 1900 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 1901 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 1902 shr_reg64_lt32_low(nfp_prog, dst, src); 1903 label_end = nfp_prog_current_offset(nfp_prog) + 6; 1904 emit_br(nfp_prog, BR_UNC, label_end, 2); 1905 /* shr_reg64_lt32_high packed in delay slot. */ 1906 shr_reg64_lt32_high(nfp_prog, dst, src); 1907 1908 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 1909 return -EINVAL; 1910 shr_reg64_ge32(nfp_prog, dst, src); 1911 1912 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 1913 return -EINVAL; 1914 } 1915 1916 return 0; 1917 } 1918 1919 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 1920 * told through PREV_ALU result. 1921 */ 1922 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1923 { 1924 if (shift_amt < 32) { 1925 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1926 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 1927 /* Set signedness bit. */ 1928 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 1929 reg_imm(0)); 1930 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1931 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 1932 } else if (shift_amt == 32) { 1933 /* NOTE: this also helps setting signedness bit. */ 1934 wrp_reg_mov(nfp_prog, dst, dst + 1); 1935 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1936 reg_b(dst + 1), SHF_SC_R_SHF, 31); 1937 } else if (shift_amt > 32) { 1938 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 1939 reg_imm(0)); 1940 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 1941 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 1942 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1943 reg_b(dst + 1), SHF_SC_R_SHF, 31); 1944 } 1945 1946 return 0; 1947 } 1948 1949 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1950 { 1951 const struct bpf_insn *insn = &meta->insn; 1952 u8 dst = insn->dst_reg * 2; 1953 1954 return __ashr_imm64(nfp_prog, dst, insn->imm); 1955 } 1956 1957 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1958 { 1959 /* NOTE: the first insn will set both indirect shift amount (source A) 1960 * and signedness bit (MSB of result). 1961 */ 1962 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 1963 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1964 reg_b(dst + 1), SHF_SC_R_SHF); 1965 } 1966 1967 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1968 { 1969 /* NOTE: it is the same as logic shift because we don't need to shift in 1970 * signedness bit when the shift amount is less than 32. 1971 */ 1972 return shr_reg64_lt32_low(nfp_prog, dst, src); 1973 } 1974 1975 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1976 { 1977 ashr_reg64_lt32_low(nfp_prog, dst, src); 1978 ashr_reg64_lt32_high(nfp_prog, dst, src); 1979 } 1980 1981 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1982 { 1983 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 1984 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 1985 reg_b(dst + 1), SHF_SC_R_SHF); 1986 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1987 reg_b(dst + 1), SHF_SC_R_SHF, 31); 1988 } 1989 1990 /* Like ashr_imm64, but need to use indirect shift. */ 1991 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1992 { 1993 const struct bpf_insn *insn = &meta->insn; 1994 u64 umin, umax; 1995 u8 dst, src; 1996 1997 dst = insn->dst_reg * 2; 1998 umin = meta->umin; 1999 umax = meta->umax; 2000 if (umin == umax) 2001 return __ashr_imm64(nfp_prog, dst, umin); 2002 2003 src = insn->src_reg * 2; 2004 if (umax < 32) { 2005 ashr_reg64_lt32(nfp_prog, dst, src); 2006 } else if (umin >= 32) { 2007 ashr_reg64_ge32(nfp_prog, dst, src); 2008 } else { 2009 u16 label_ge32, label_end; 2010 2011 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2012 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2013 ashr_reg64_lt32_low(nfp_prog, dst, src); 2014 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2015 emit_br(nfp_prog, BR_UNC, label_end, 2); 2016 /* ashr_reg64_lt32_high packed in delay slot. */ 2017 ashr_reg64_lt32_high(nfp_prog, dst, src); 2018 2019 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2020 return -EINVAL; 2021 ashr_reg64_ge32(nfp_prog, dst, src); 2022 2023 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2024 return -EINVAL; 2025 } 2026 2027 return 0; 2028 } 2029 2030 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2031 { 2032 const struct bpf_insn *insn = &meta->insn; 2033 2034 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2035 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2036 2037 return 0; 2038 } 2039 2040 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2041 { 2042 const struct bpf_insn *insn = &meta->insn; 2043 2044 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2045 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2046 2047 return 0; 2048 } 2049 2050 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2051 { 2052 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2053 } 2054 2055 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2056 { 2057 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2058 } 2059 2060 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2061 { 2062 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2063 } 2064 2065 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2066 { 2067 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2068 } 2069 2070 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2071 { 2072 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2073 } 2074 2075 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2076 { 2077 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2078 } 2079 2080 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2081 { 2082 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2083 } 2084 2085 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2086 { 2087 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2088 } 2089 2090 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2091 { 2092 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2093 } 2094 2095 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2096 { 2097 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2098 } 2099 2100 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2101 { 2102 u8 dst = meta->insn.dst_reg * 2; 2103 2104 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2105 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2106 2107 return 0; 2108 } 2109 2110 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2111 { 2112 const struct bpf_insn *insn = &meta->insn; 2113 2114 if (!insn->imm) 2115 return 1; /* TODO: zero shift means indirect */ 2116 2117 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2118 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2119 SHF_SC_L_SHF, insn->imm); 2120 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2121 2122 return 0; 2123 } 2124 2125 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2126 { 2127 const struct bpf_insn *insn = &meta->insn; 2128 u8 gpr = insn->dst_reg * 2; 2129 2130 switch (insn->imm) { 2131 case 16: 2132 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2133 SHF_SC_R_ROT, 8); 2134 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2135 SHF_SC_R_SHF, 16); 2136 2137 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2138 break; 2139 case 32: 2140 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2141 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2142 break; 2143 case 64: 2144 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2145 2146 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2147 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2148 break; 2149 } 2150 2151 return 0; 2152 } 2153 2154 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2155 { 2156 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2157 u32 imm_lo, imm_hi; 2158 u8 dst; 2159 2160 dst = prev->insn.dst_reg * 2; 2161 imm_lo = prev->insn.imm; 2162 imm_hi = meta->insn.imm; 2163 2164 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2165 2166 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2167 if (imm_hi == imm_lo) 2168 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2169 else 2170 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2171 2172 return 0; 2173 } 2174 2175 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2176 { 2177 meta->double_cb = imm_ld8_part2; 2178 return 0; 2179 } 2180 2181 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2182 { 2183 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2184 } 2185 2186 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2187 { 2188 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2189 } 2190 2191 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2192 { 2193 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2194 } 2195 2196 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2197 { 2198 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2199 meta->insn.src_reg * 2, 1); 2200 } 2201 2202 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2203 { 2204 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2205 meta->insn.src_reg * 2, 2); 2206 } 2207 2208 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2209 { 2210 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2211 meta->insn.src_reg * 2, 4); 2212 } 2213 2214 static int 2215 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2216 unsigned int size, unsigned int ptr_off) 2217 { 2218 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2219 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2220 true, wrp_lmem_load); 2221 } 2222 2223 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2224 u8 size) 2225 { 2226 swreg dst = reg_both(meta->insn.dst_reg * 2); 2227 2228 switch (meta->insn.off) { 2229 case offsetof(struct __sk_buff, len): 2230 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2231 return -EOPNOTSUPP; 2232 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2233 break; 2234 case offsetof(struct __sk_buff, data): 2235 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2236 return -EOPNOTSUPP; 2237 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2238 break; 2239 case offsetof(struct __sk_buff, data_end): 2240 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2241 return -EOPNOTSUPP; 2242 emit_alu(nfp_prog, dst, 2243 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2244 break; 2245 default: 2246 return -EOPNOTSUPP; 2247 } 2248 2249 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2250 2251 return 0; 2252 } 2253 2254 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2255 u8 size) 2256 { 2257 swreg dst = reg_both(meta->insn.dst_reg * 2); 2258 2259 switch (meta->insn.off) { 2260 case offsetof(struct xdp_md, data): 2261 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2262 return -EOPNOTSUPP; 2263 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2264 break; 2265 case offsetof(struct xdp_md, data_end): 2266 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2267 return -EOPNOTSUPP; 2268 emit_alu(nfp_prog, dst, 2269 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2270 break; 2271 default: 2272 return -EOPNOTSUPP; 2273 } 2274 2275 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2276 2277 return 0; 2278 } 2279 2280 static int 2281 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2282 unsigned int size) 2283 { 2284 swreg tmp_reg; 2285 2286 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2287 2288 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2289 tmp_reg, meta->insn.dst_reg * 2, size); 2290 } 2291 2292 static int 2293 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2294 unsigned int size) 2295 { 2296 swreg tmp_reg; 2297 2298 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2299 2300 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2301 tmp_reg, meta->insn.dst_reg * 2, size); 2302 } 2303 2304 static void 2305 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2306 struct nfp_insn_meta *meta) 2307 { 2308 s16 range_start = meta->pkt_cache.range_start; 2309 s16 range_end = meta->pkt_cache.range_end; 2310 swreg src_base, off; 2311 u8 xfer_num, len; 2312 bool indir; 2313 2314 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2315 src_base = reg_a(meta->insn.src_reg * 2); 2316 len = range_end - range_start; 2317 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2318 2319 indir = len > 8 * REG_WIDTH; 2320 /* Setup PREV_ALU for indirect mode. */ 2321 if (indir) 2322 wrp_immed(nfp_prog, reg_none(), 2323 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2324 2325 /* Cache memory into transfer-in registers. */ 2326 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2327 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2328 } 2329 2330 static int 2331 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2332 struct nfp_insn_meta *meta, 2333 unsigned int size) 2334 { 2335 s16 range_start = meta->pkt_cache.range_start; 2336 s16 insn_off = meta->insn.off - range_start; 2337 swreg dst_lo, dst_hi, src_lo, src_mid; 2338 u8 dst_gpr = meta->insn.dst_reg * 2; 2339 u8 len_lo = size, len_mid = 0; 2340 u8 idx = insn_off / REG_WIDTH; 2341 u8 off = insn_off % REG_WIDTH; 2342 2343 dst_hi = reg_both(dst_gpr + 1); 2344 dst_lo = reg_both(dst_gpr); 2345 src_lo = reg_xfer(idx); 2346 2347 /* The read length could involve as many as three registers. */ 2348 if (size > REG_WIDTH - off) { 2349 /* Calculate the part in the second register. */ 2350 len_lo = REG_WIDTH - off; 2351 len_mid = size - len_lo; 2352 2353 /* Calculate the part in the third register. */ 2354 if (size > 2 * REG_WIDTH - off) 2355 len_mid = REG_WIDTH; 2356 } 2357 2358 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2359 2360 if (!len_mid) { 2361 wrp_immed(nfp_prog, dst_hi, 0); 2362 return 0; 2363 } 2364 2365 src_mid = reg_xfer(idx + 1); 2366 2367 if (size <= REG_WIDTH) { 2368 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2369 wrp_immed(nfp_prog, dst_hi, 0); 2370 } else { 2371 swreg src_hi = reg_xfer(idx + 2); 2372 2373 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2374 REG_WIDTH - len_lo, len_lo); 2375 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2376 REG_WIDTH - len_lo); 2377 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2378 len_lo); 2379 } 2380 2381 return 0; 2382 } 2383 2384 static int 2385 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2386 struct nfp_insn_meta *meta, 2387 unsigned int size) 2388 { 2389 swreg dst_lo, dst_hi, src_lo; 2390 u8 dst_gpr, idx; 2391 2392 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2393 dst_gpr = meta->insn.dst_reg * 2; 2394 dst_hi = reg_both(dst_gpr + 1); 2395 dst_lo = reg_both(dst_gpr); 2396 src_lo = reg_xfer(idx); 2397 2398 if (size < REG_WIDTH) { 2399 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2400 wrp_immed(nfp_prog, dst_hi, 0); 2401 } else if (size == REG_WIDTH) { 2402 wrp_mov(nfp_prog, dst_lo, src_lo); 2403 wrp_immed(nfp_prog, dst_hi, 0); 2404 } else { 2405 swreg src_hi = reg_xfer(idx + 1); 2406 2407 wrp_mov(nfp_prog, dst_lo, src_lo); 2408 wrp_mov(nfp_prog, dst_hi, src_hi); 2409 } 2410 2411 return 0; 2412 } 2413 2414 static int 2415 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2416 struct nfp_insn_meta *meta, unsigned int size) 2417 { 2418 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2419 2420 if (IS_ALIGNED(off, REG_WIDTH)) 2421 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2422 2423 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2424 } 2425 2426 static int 2427 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2428 unsigned int size) 2429 { 2430 if (meta->ldst_gather_len) 2431 return nfp_cpp_memcpy(nfp_prog, meta); 2432 2433 if (meta->ptr.type == PTR_TO_CTX) { 2434 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2435 return mem_ldx_xdp(nfp_prog, meta, size); 2436 else 2437 return mem_ldx_skb(nfp_prog, meta, size); 2438 } 2439 2440 if (meta->ptr.type == PTR_TO_PACKET) { 2441 if (meta->pkt_cache.range_end) { 2442 if (meta->pkt_cache.do_init) 2443 mem_ldx_data_init_pktcache(nfp_prog, meta); 2444 2445 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2446 } else { 2447 return mem_ldx_data(nfp_prog, meta, size); 2448 } 2449 } 2450 2451 if (meta->ptr.type == PTR_TO_STACK) 2452 return mem_ldx_stack(nfp_prog, meta, size, 2453 meta->ptr.off + meta->ptr.var_off.value); 2454 2455 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2456 return mem_ldx_emem(nfp_prog, meta, size); 2457 2458 return -EOPNOTSUPP; 2459 } 2460 2461 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2462 { 2463 return mem_ldx(nfp_prog, meta, 1); 2464 } 2465 2466 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2467 { 2468 return mem_ldx(nfp_prog, meta, 2); 2469 } 2470 2471 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2472 { 2473 return mem_ldx(nfp_prog, meta, 4); 2474 } 2475 2476 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2477 { 2478 return mem_ldx(nfp_prog, meta, 8); 2479 } 2480 2481 static int 2482 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2483 unsigned int size) 2484 { 2485 u64 imm = meta->insn.imm; /* sign extend */ 2486 swreg off_reg; 2487 2488 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2489 2490 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2491 imm, size); 2492 } 2493 2494 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2495 unsigned int size) 2496 { 2497 if (meta->ptr.type == PTR_TO_PACKET) 2498 return mem_st_data(nfp_prog, meta, size); 2499 2500 return -EOPNOTSUPP; 2501 } 2502 2503 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2504 { 2505 return mem_st(nfp_prog, meta, 1); 2506 } 2507 2508 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2509 { 2510 return mem_st(nfp_prog, meta, 2); 2511 } 2512 2513 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2514 { 2515 return mem_st(nfp_prog, meta, 4); 2516 } 2517 2518 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2519 { 2520 return mem_st(nfp_prog, meta, 8); 2521 } 2522 2523 static int 2524 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2525 unsigned int size) 2526 { 2527 swreg off_reg; 2528 2529 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2530 2531 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2532 meta->insn.src_reg * 2, size); 2533 } 2534 2535 static int 2536 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2537 unsigned int size, unsigned int ptr_off) 2538 { 2539 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2540 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2541 false, wrp_lmem_store); 2542 } 2543 2544 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2545 { 2546 switch (meta->insn.off) { 2547 case offsetof(struct xdp_md, rx_queue_index): 2548 return nfp_queue_select(nfp_prog, meta); 2549 } 2550 2551 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2552 return -EOPNOTSUPP; 2553 } 2554 2555 static int 2556 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2557 unsigned int size) 2558 { 2559 if (meta->ptr.type == PTR_TO_PACKET) 2560 return mem_stx_data(nfp_prog, meta, size); 2561 2562 if (meta->ptr.type == PTR_TO_STACK) 2563 return mem_stx_stack(nfp_prog, meta, size, 2564 meta->ptr.off + meta->ptr.var_off.value); 2565 2566 return -EOPNOTSUPP; 2567 } 2568 2569 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2570 { 2571 return mem_stx(nfp_prog, meta, 1); 2572 } 2573 2574 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2575 { 2576 return mem_stx(nfp_prog, meta, 2); 2577 } 2578 2579 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2580 { 2581 if (meta->ptr.type == PTR_TO_CTX) 2582 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2583 return mem_stx_xdp(nfp_prog, meta); 2584 return mem_stx(nfp_prog, meta, 4); 2585 } 2586 2587 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2588 { 2589 return mem_stx(nfp_prog, meta, 8); 2590 } 2591 2592 static int 2593 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2594 { 2595 u8 dst_gpr = meta->insn.dst_reg * 2; 2596 u8 src_gpr = meta->insn.src_reg * 2; 2597 unsigned int full_add, out; 2598 swreg addra, addrb, off; 2599 2600 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2601 2602 /* We can fit 16 bits into command immediate, if we know the immediate 2603 * is guaranteed to either always or never fit into 16 bit we only 2604 * generate code to handle that particular case, otherwise generate 2605 * code for both. 2606 */ 2607 out = nfp_prog_current_offset(nfp_prog); 2608 full_add = nfp_prog_current_offset(nfp_prog); 2609 2610 if (meta->insn.off) { 2611 out += 2; 2612 full_add += 2; 2613 } 2614 if (meta->xadd_maybe_16bit) { 2615 out += 3; 2616 full_add += 3; 2617 } 2618 if (meta->xadd_over_16bit) 2619 out += 2 + is64; 2620 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2621 out += 5; 2622 full_add += 5; 2623 } 2624 2625 /* Generate the branch for choosing add_imm vs add */ 2626 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2627 swreg max_imm = imm_a(nfp_prog); 2628 2629 wrp_immed(nfp_prog, max_imm, 0xffff); 2630 emit_alu(nfp_prog, reg_none(), 2631 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2632 emit_alu(nfp_prog, reg_none(), 2633 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2634 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2635 /* defer for add */ 2636 } 2637 2638 /* If insn has an offset add to the address */ 2639 if (!meta->insn.off) { 2640 addra = reg_a(dst_gpr); 2641 addrb = reg_b(dst_gpr + 1); 2642 } else { 2643 emit_alu(nfp_prog, imma_a(nfp_prog), 2644 reg_a(dst_gpr), ALU_OP_ADD, off); 2645 emit_alu(nfp_prog, imma_b(nfp_prog), 2646 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2647 addra = imma_a(nfp_prog); 2648 addrb = imma_b(nfp_prog); 2649 } 2650 2651 /* Generate the add_imm if 16 bits are possible */ 2652 if (meta->xadd_maybe_16bit) { 2653 swreg prev_alu = imm_a(nfp_prog); 2654 2655 wrp_immed(nfp_prog, prev_alu, 2656 FIELD_PREP(CMD_OVE_DATA, 2) | 2657 CMD_OVE_LEN | 2658 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2659 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2660 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2661 addra, addrb, 0, CMD_CTX_NO_SWAP); 2662 2663 if (meta->xadd_over_16bit) 2664 emit_br(nfp_prog, BR_UNC, out, 0); 2665 } 2666 2667 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2668 return -EINVAL; 2669 2670 /* Generate the add if 16 bits are not guaranteed */ 2671 if (meta->xadd_over_16bit) { 2672 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2673 addra, addrb, is64 << 2, 2674 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2675 2676 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2677 if (is64) 2678 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2679 } 2680 2681 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2682 return -EINVAL; 2683 2684 return 0; 2685 } 2686 2687 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2688 { 2689 return mem_xadd(nfp_prog, meta, false); 2690 } 2691 2692 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2693 { 2694 return mem_xadd(nfp_prog, meta, true); 2695 } 2696 2697 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2698 { 2699 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2700 2701 return 0; 2702 } 2703 2704 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2705 { 2706 const struct bpf_insn *insn = &meta->insn; 2707 u64 imm = insn->imm; /* sign extend */ 2708 swreg or1, or2, tmp_reg; 2709 2710 or1 = reg_a(insn->dst_reg * 2); 2711 or2 = reg_b(insn->dst_reg * 2 + 1); 2712 2713 if (imm & ~0U) { 2714 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2715 emit_alu(nfp_prog, imm_a(nfp_prog), 2716 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2717 or1 = imm_a(nfp_prog); 2718 } 2719 2720 if (imm >> 32) { 2721 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2722 emit_alu(nfp_prog, imm_b(nfp_prog), 2723 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2724 or2 = imm_b(nfp_prog); 2725 } 2726 2727 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2728 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2729 2730 return 0; 2731 } 2732 2733 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2734 { 2735 const struct bpf_insn *insn = &meta->insn; 2736 u64 imm = insn->imm; /* sign extend */ 2737 swreg tmp_reg; 2738 2739 if (!imm) { 2740 meta->skip = true; 2741 return 0; 2742 } 2743 2744 if (imm & ~0U) { 2745 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2746 emit_alu(nfp_prog, reg_none(), 2747 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2748 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2749 } 2750 2751 if (imm >> 32) { 2752 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2753 emit_alu(nfp_prog, reg_none(), 2754 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2755 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2756 } 2757 2758 return 0; 2759 } 2760 2761 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2762 { 2763 const struct bpf_insn *insn = &meta->insn; 2764 u64 imm = insn->imm; /* sign extend */ 2765 swreg tmp_reg; 2766 2767 if (!imm) { 2768 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2769 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2770 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2771 return 0; 2772 } 2773 2774 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2775 emit_alu(nfp_prog, reg_none(), 2776 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2777 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2778 2779 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2780 emit_alu(nfp_prog, reg_none(), 2781 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2782 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2783 2784 return 0; 2785 } 2786 2787 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2788 { 2789 const struct bpf_insn *insn = &meta->insn; 2790 2791 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2792 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2793 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2794 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2795 emit_alu(nfp_prog, reg_none(), 2796 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2797 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2798 2799 return 0; 2800 } 2801 2802 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2803 { 2804 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2805 } 2806 2807 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2808 { 2809 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2810 } 2811 2812 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2813 { 2814 switch (meta->insn.imm) { 2815 case BPF_FUNC_xdp_adjust_head: 2816 return adjust_head(nfp_prog, meta); 2817 case BPF_FUNC_map_lookup_elem: 2818 case BPF_FUNC_map_update_elem: 2819 case BPF_FUNC_map_delete_elem: 2820 return map_call_stack_common(nfp_prog, meta); 2821 case BPF_FUNC_get_prandom_u32: 2822 return nfp_get_prandom_u32(nfp_prog, meta); 2823 case BPF_FUNC_perf_event_output: 2824 return nfp_perf_event_output(nfp_prog, meta); 2825 default: 2826 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2827 return -EOPNOTSUPP; 2828 } 2829 } 2830 2831 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2832 { 2833 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2834 2835 return 0; 2836 } 2837 2838 static const instr_cb_t instr_cb[256] = { 2839 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2840 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2841 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2842 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2843 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2844 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2845 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2846 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2847 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2848 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2849 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2850 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2851 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2852 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 2853 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2854 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 2855 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2856 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 2857 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 2858 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2859 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2860 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2861 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2862 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2863 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2864 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2865 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2866 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2867 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2868 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2869 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2870 [BPF_ALU | BPF_NEG] = neg_reg, 2871 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2872 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2873 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2874 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2875 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2876 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2877 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2878 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2879 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2880 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2881 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2882 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2883 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2884 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2885 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2886 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2887 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2888 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 2889 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 2890 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2891 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2892 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2893 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2894 [BPF_JMP | BPF_JA | BPF_K] = jump, 2895 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2896 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 2897 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 2898 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 2899 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 2900 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 2901 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 2902 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 2903 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 2904 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2905 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2906 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2907 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 2908 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 2909 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 2910 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 2911 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 2912 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 2913 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 2914 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 2915 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2916 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2917 [BPF_JMP | BPF_CALL] = call, 2918 [BPF_JMP | BPF_EXIT] = goto_out, 2919 }; 2920 2921 /* --- Assembler logic --- */ 2922 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2923 { 2924 struct nfp_insn_meta *meta, *jmp_dst; 2925 u32 idx, br_idx; 2926 2927 list_for_each_entry(meta, &nfp_prog->insns, l) { 2928 if (meta->skip) 2929 continue; 2930 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2931 continue; 2932 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2933 continue; 2934 2935 if (list_is_last(&meta->l, &nfp_prog->insns)) 2936 br_idx = nfp_prog->last_bpf_off; 2937 else 2938 br_idx = list_next_entry(meta, l)->off - 1; 2939 2940 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2941 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2942 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2943 return -ELOOP; 2944 } 2945 /* Leave special branches for later */ 2946 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2947 RELO_BR_REL) 2948 continue; 2949 2950 if (!meta->jmp_dst) { 2951 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2952 return -ELOOP; 2953 } 2954 2955 jmp_dst = meta->jmp_dst; 2956 2957 if (jmp_dst->skip) { 2958 pr_err("Branch landing on removed instruction!!\n"); 2959 return -ELOOP; 2960 } 2961 2962 for (idx = meta->off; idx <= br_idx; idx++) { 2963 if (!nfp_is_br(nfp_prog->prog[idx])) 2964 continue; 2965 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2966 } 2967 } 2968 2969 return 0; 2970 } 2971 2972 static void nfp_intro(struct nfp_prog *nfp_prog) 2973 { 2974 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2975 emit_alu(nfp_prog, plen_reg(nfp_prog), 2976 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2977 } 2978 2979 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2980 { 2981 /* TC direct-action mode: 2982 * 0,1 ok NOT SUPPORTED[1] 2983 * 2 drop 0x22 -> drop, count as stat1 2984 * 4,5 nuke 0x02 -> drop 2985 * 7 redir 0x44 -> redir, count as stat2 2986 * * unspec 0x11 -> pass, count as stat0 2987 * 2988 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2989 * the exact decision made. We are forced to support UNSPEC 2990 * to handle aborts so that's the only one we handle for passing 2991 * packets up the stack. 2992 */ 2993 /* Target for aborts */ 2994 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2995 2996 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2997 2998 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2999 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3000 3001 /* Target for normal exits */ 3002 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3003 3004 /* if R0 > 7 jump to abort */ 3005 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3006 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3007 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3008 3009 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3010 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3011 3012 emit_shf(nfp_prog, reg_a(1), 3013 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3014 3015 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3016 emit_shf(nfp_prog, reg_a(2), 3017 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3018 3019 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3020 emit_shf(nfp_prog, reg_b(2), 3021 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3022 3023 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3024 3025 emit_shf(nfp_prog, reg_b(2), 3026 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3027 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3028 } 3029 3030 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3031 { 3032 /* XDP return codes: 3033 * 0 aborted 0x82 -> drop, count as stat3 3034 * 1 drop 0x22 -> drop, count as stat1 3035 * 2 pass 0x11 -> pass, count as stat0 3036 * 3 tx 0x44 -> redir, count as stat2 3037 * * unknown 0x82 -> drop, count as stat3 3038 */ 3039 /* Target for aborts */ 3040 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3041 3042 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3043 3044 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3045 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3046 3047 /* Target for normal exits */ 3048 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3049 3050 /* if R0 > 3 jump to abort */ 3051 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3052 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3053 3054 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3055 3056 emit_shf(nfp_prog, reg_a(1), 3057 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3058 3059 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3060 emit_shf(nfp_prog, reg_b(2), 3061 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3062 3063 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3064 3065 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3066 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3067 } 3068 3069 static void nfp_outro(struct nfp_prog *nfp_prog) 3070 { 3071 switch (nfp_prog->type) { 3072 case BPF_PROG_TYPE_SCHED_CLS: 3073 nfp_outro_tc_da(nfp_prog); 3074 break; 3075 case BPF_PROG_TYPE_XDP: 3076 nfp_outro_xdp(nfp_prog); 3077 break; 3078 default: 3079 WARN_ON(1); 3080 } 3081 } 3082 3083 static int nfp_translate(struct nfp_prog *nfp_prog) 3084 { 3085 struct nfp_insn_meta *meta; 3086 int err; 3087 3088 nfp_intro(nfp_prog); 3089 if (nfp_prog->error) 3090 return nfp_prog->error; 3091 3092 list_for_each_entry(meta, &nfp_prog->insns, l) { 3093 instr_cb_t cb = instr_cb[meta->insn.code]; 3094 3095 meta->off = nfp_prog_current_offset(nfp_prog); 3096 3097 if (meta->skip) { 3098 nfp_prog->n_translated++; 3099 continue; 3100 } 3101 3102 if (nfp_meta_has_prev(nfp_prog, meta) && 3103 nfp_meta_prev(meta)->double_cb) 3104 cb = nfp_meta_prev(meta)->double_cb; 3105 if (!cb) 3106 return -ENOENT; 3107 err = cb(nfp_prog, meta); 3108 if (err) 3109 return err; 3110 if (nfp_prog->error) 3111 return nfp_prog->error; 3112 3113 nfp_prog->n_translated++; 3114 } 3115 3116 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3117 3118 nfp_outro(nfp_prog); 3119 if (nfp_prog->error) 3120 return nfp_prog->error; 3121 3122 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3123 if (nfp_prog->error) 3124 return nfp_prog->error; 3125 3126 return nfp_fixup_branches(nfp_prog); 3127 } 3128 3129 /* --- Optimizations --- */ 3130 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3131 { 3132 struct nfp_insn_meta *meta; 3133 3134 list_for_each_entry(meta, &nfp_prog->insns, l) { 3135 struct bpf_insn insn = meta->insn; 3136 3137 /* Programs converted from cBPF start with register xoring */ 3138 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3139 insn.src_reg == insn.dst_reg) 3140 continue; 3141 3142 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3143 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3144 insn.src_reg == 1 && insn.dst_reg == 6) 3145 meta->skip = true; 3146 3147 /* Return as soon as something doesn't match */ 3148 if (!meta->skip) 3149 return; 3150 } 3151 } 3152 3153 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3154 * convert add/sub of a negative number into a sub/add of a positive one. 3155 */ 3156 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3157 { 3158 struct nfp_insn_meta *meta; 3159 3160 list_for_each_entry(meta, &nfp_prog->insns, l) { 3161 struct bpf_insn insn = meta->insn; 3162 3163 if (meta->skip) 3164 continue; 3165 3166 if (BPF_CLASS(insn.code) != BPF_ALU && 3167 BPF_CLASS(insn.code) != BPF_ALU64 && 3168 BPF_CLASS(insn.code) != BPF_JMP) 3169 continue; 3170 if (BPF_SRC(insn.code) != BPF_K) 3171 continue; 3172 if (insn.imm >= 0) 3173 continue; 3174 3175 if (BPF_CLASS(insn.code) == BPF_JMP) { 3176 switch (BPF_OP(insn.code)) { 3177 case BPF_JGE: 3178 case BPF_JSGE: 3179 case BPF_JLT: 3180 case BPF_JSLT: 3181 meta->jump_neg_op = true; 3182 break; 3183 default: 3184 continue; 3185 } 3186 } else { 3187 if (BPF_OP(insn.code) == BPF_ADD) 3188 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3189 else if (BPF_OP(insn.code) == BPF_SUB) 3190 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3191 else 3192 continue; 3193 3194 meta->insn.code = insn.code | BPF_K; 3195 } 3196 3197 meta->insn.imm = -insn.imm; 3198 } 3199 } 3200 3201 /* Remove masking after load since our load guarantees this is not needed */ 3202 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3203 { 3204 struct nfp_insn_meta *meta1, *meta2; 3205 const s32 exp_mask[] = { 3206 [BPF_B] = 0x000000ffU, 3207 [BPF_H] = 0x0000ffffU, 3208 [BPF_W] = 0xffffffffU, 3209 }; 3210 3211 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3212 struct bpf_insn insn, next; 3213 3214 insn = meta1->insn; 3215 next = meta2->insn; 3216 3217 if (BPF_CLASS(insn.code) != BPF_LD) 3218 continue; 3219 if (BPF_MODE(insn.code) != BPF_ABS && 3220 BPF_MODE(insn.code) != BPF_IND) 3221 continue; 3222 3223 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3224 continue; 3225 3226 if (!exp_mask[BPF_SIZE(insn.code)]) 3227 continue; 3228 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3229 continue; 3230 3231 if (next.src_reg || next.dst_reg) 3232 continue; 3233 3234 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3235 continue; 3236 3237 meta2->skip = true; 3238 } 3239 } 3240 3241 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3242 { 3243 struct nfp_insn_meta *meta1, *meta2, *meta3; 3244 3245 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3246 struct bpf_insn insn, next1, next2; 3247 3248 insn = meta1->insn; 3249 next1 = meta2->insn; 3250 next2 = meta3->insn; 3251 3252 if (BPF_CLASS(insn.code) != BPF_LD) 3253 continue; 3254 if (BPF_MODE(insn.code) != BPF_ABS && 3255 BPF_MODE(insn.code) != BPF_IND) 3256 continue; 3257 if (BPF_SIZE(insn.code) != BPF_W) 3258 continue; 3259 3260 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3261 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3262 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3263 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3264 continue; 3265 3266 if (next1.src_reg || next1.dst_reg || 3267 next2.src_reg || next2.dst_reg) 3268 continue; 3269 3270 if (next1.imm != 0x20 || next2.imm != 0x20) 3271 continue; 3272 3273 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3274 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3275 continue; 3276 3277 meta2->skip = true; 3278 meta3->skip = true; 3279 } 3280 } 3281 3282 /* load/store pair that forms memory copy sould look like the following: 3283 * 3284 * ld_width R, [addr_src + offset_src] 3285 * st_width [addr_dest + offset_dest], R 3286 * 3287 * The destination register of load and source register of store should 3288 * be the same, load and store should also perform at the same width. 3289 * If either of addr_src or addr_dest is stack pointer, we don't do the 3290 * CPP optimization as stack is modelled by registers on NFP. 3291 */ 3292 static bool 3293 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3294 struct nfp_insn_meta *st_meta) 3295 { 3296 struct bpf_insn *ld = &ld_meta->insn; 3297 struct bpf_insn *st = &st_meta->insn; 3298 3299 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3300 return false; 3301 3302 if (ld_meta->ptr.type != PTR_TO_PACKET) 3303 return false; 3304 3305 if (st_meta->ptr.type != PTR_TO_PACKET) 3306 return false; 3307 3308 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3309 return false; 3310 3311 if (ld->dst_reg != st->src_reg) 3312 return false; 3313 3314 /* There is jump to the store insn in this pair. */ 3315 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3316 return false; 3317 3318 return true; 3319 } 3320 3321 /* Currently, we only support chaining load/store pairs if: 3322 * 3323 * - Their address base registers are the same. 3324 * - Their address offsets are in the same order. 3325 * - They operate at the same memory width. 3326 * - There is no jump into the middle of them. 3327 */ 3328 static bool 3329 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3330 struct nfp_insn_meta *st_meta, 3331 struct bpf_insn *prev_ld, 3332 struct bpf_insn *prev_st) 3333 { 3334 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3335 struct bpf_insn *ld = &ld_meta->insn; 3336 struct bpf_insn *st = &st_meta->insn; 3337 s16 prev_ld_off, prev_st_off; 3338 3339 /* This pair is the start pair. */ 3340 if (!prev_ld) 3341 return true; 3342 3343 prev_size = BPF_LDST_BYTES(prev_ld); 3344 curr_size = BPF_LDST_BYTES(ld); 3345 prev_ld_base = prev_ld->src_reg; 3346 prev_st_base = prev_st->dst_reg; 3347 prev_ld_dst = prev_ld->dst_reg; 3348 prev_ld_off = prev_ld->off; 3349 prev_st_off = prev_st->off; 3350 3351 if (ld->dst_reg != prev_ld_dst) 3352 return false; 3353 3354 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3355 return false; 3356 3357 if (curr_size != prev_size) 3358 return false; 3359 3360 /* There is jump to the head of this pair. */ 3361 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3362 return false; 3363 3364 /* Both in ascending order. */ 3365 if (prev_ld_off + prev_size == ld->off && 3366 prev_st_off + prev_size == st->off) 3367 return true; 3368 3369 /* Both in descending order. */ 3370 if (ld->off + curr_size == prev_ld_off && 3371 st->off + curr_size == prev_st_off) 3372 return true; 3373 3374 return false; 3375 } 3376 3377 /* Return TRUE if cross memory access happens. Cross memory access means 3378 * store area is overlapping with load area that a later load might load 3379 * the value from previous store, for this case we can't treat the sequence 3380 * as an memory copy. 3381 */ 3382 static bool 3383 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3384 struct nfp_insn_meta *head_st_meta) 3385 { 3386 s16 head_ld_off, head_st_off, ld_off; 3387 3388 /* Different pointer types does not overlap. */ 3389 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3390 return false; 3391 3392 /* load and store are both PTR_TO_PACKET, check ID info. */ 3393 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3394 return true; 3395 3396 /* Canonicalize the offsets. Turn all of them against the original 3397 * base register. 3398 */ 3399 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3400 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3401 ld_off = ld->off + head_ld_meta->ptr.off; 3402 3403 /* Ascending order cross. */ 3404 if (ld_off > head_ld_off && 3405 head_ld_off < head_st_off && ld_off >= head_st_off) 3406 return true; 3407 3408 /* Descending order cross. */ 3409 if (ld_off < head_ld_off && 3410 head_ld_off > head_st_off && ld_off <= head_st_off) 3411 return true; 3412 3413 return false; 3414 } 3415 3416 /* This pass try to identify the following instructoin sequences. 3417 * 3418 * load R, [regA + offA] 3419 * store [regB + offB], R 3420 * load R, [regA + offA + const_imm_A] 3421 * store [regB + offB + const_imm_A], R 3422 * load R, [regA + offA + 2 * const_imm_A] 3423 * store [regB + offB + 2 * const_imm_A], R 3424 * ... 3425 * 3426 * Above sequence is typically generated by compiler when lowering 3427 * memcpy. NFP prefer using CPP instructions to accelerate it. 3428 */ 3429 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 3430 { 3431 struct nfp_insn_meta *head_ld_meta = NULL; 3432 struct nfp_insn_meta *head_st_meta = NULL; 3433 struct nfp_insn_meta *meta1, *meta2; 3434 struct bpf_insn *prev_ld = NULL; 3435 struct bpf_insn *prev_st = NULL; 3436 u8 count = 0; 3437 3438 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3439 struct bpf_insn *ld = &meta1->insn; 3440 struct bpf_insn *st = &meta2->insn; 3441 3442 /* Reset record status if any of the following if true: 3443 * - The current insn pair is not load/store. 3444 * - The load/store pair doesn't chain with previous one. 3445 * - The chained load/store pair crossed with previous pair. 3446 * - The chained load/store pair has a total size of memory 3447 * copy beyond 128 bytes which is the maximum length a 3448 * single NFP CPP command can transfer. 3449 */ 3450 if (!curr_pair_is_memcpy(meta1, meta2) || 3451 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 3452 prev_st) || 3453 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 3454 head_st_meta) || 3455 head_ld_meta->ldst_gather_len >= 128))) { 3456 if (!count) 3457 continue; 3458 3459 if (count > 1) { 3460 s16 prev_ld_off = prev_ld->off; 3461 s16 prev_st_off = prev_st->off; 3462 s16 head_ld_off = head_ld_meta->insn.off; 3463 3464 if (prev_ld_off < head_ld_off) { 3465 head_ld_meta->insn.off = prev_ld_off; 3466 head_st_meta->insn.off = prev_st_off; 3467 head_ld_meta->ldst_gather_len = 3468 -head_ld_meta->ldst_gather_len; 3469 } 3470 3471 head_ld_meta->paired_st = &head_st_meta->insn; 3472 head_st_meta->skip = true; 3473 } else { 3474 head_ld_meta->ldst_gather_len = 0; 3475 } 3476 3477 /* If the chain is ended by an load/store pair then this 3478 * could serve as the new head of the the next chain. 3479 */ 3480 if (curr_pair_is_memcpy(meta1, meta2)) { 3481 head_ld_meta = meta1; 3482 head_st_meta = meta2; 3483 head_ld_meta->ldst_gather_len = 3484 BPF_LDST_BYTES(ld); 3485 meta1 = nfp_meta_next(meta1); 3486 meta2 = nfp_meta_next(meta2); 3487 prev_ld = ld; 3488 prev_st = st; 3489 count = 1; 3490 } else { 3491 head_ld_meta = NULL; 3492 head_st_meta = NULL; 3493 prev_ld = NULL; 3494 prev_st = NULL; 3495 count = 0; 3496 } 3497 3498 continue; 3499 } 3500 3501 if (!head_ld_meta) { 3502 head_ld_meta = meta1; 3503 head_st_meta = meta2; 3504 } else { 3505 meta1->skip = true; 3506 meta2->skip = true; 3507 } 3508 3509 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 3510 meta1 = nfp_meta_next(meta1); 3511 meta2 = nfp_meta_next(meta2); 3512 prev_ld = ld; 3513 prev_st = st; 3514 count++; 3515 } 3516 } 3517 3518 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 3519 { 3520 struct nfp_insn_meta *meta, *range_node = NULL; 3521 s16 range_start = 0, range_end = 0; 3522 bool cache_avail = false; 3523 struct bpf_insn *insn; 3524 s32 range_ptr_off = 0; 3525 u32 range_ptr_id = 0; 3526 3527 list_for_each_entry(meta, &nfp_prog->insns, l) { 3528 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 3529 cache_avail = false; 3530 3531 if (meta->skip) 3532 continue; 3533 3534 insn = &meta->insn; 3535 3536 if (is_mbpf_store_pkt(meta) || 3537 insn->code == (BPF_JMP | BPF_CALL) || 3538 is_mbpf_classic_store_pkt(meta) || 3539 is_mbpf_classic_load(meta)) { 3540 cache_avail = false; 3541 continue; 3542 } 3543 3544 if (!is_mbpf_load(meta)) 3545 continue; 3546 3547 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 3548 cache_avail = false; 3549 continue; 3550 } 3551 3552 if (!cache_avail) { 3553 cache_avail = true; 3554 if (range_node) 3555 goto end_current_then_start_new; 3556 goto start_new; 3557 } 3558 3559 /* Check ID to make sure two reads share the same 3560 * variable offset against PTR_TO_PACKET, and check OFF 3561 * to make sure they also share the same constant 3562 * offset. 3563 * 3564 * OFFs don't really need to be the same, because they 3565 * are the constant offsets against PTR_TO_PACKET, so 3566 * for different OFFs, we could canonicalize them to 3567 * offsets against original packet pointer. We don't 3568 * support this. 3569 */ 3570 if (meta->ptr.id == range_ptr_id && 3571 meta->ptr.off == range_ptr_off) { 3572 s16 new_start = range_start; 3573 s16 end, off = insn->off; 3574 s16 new_end = range_end; 3575 bool changed = false; 3576 3577 if (off < range_start) { 3578 new_start = off; 3579 changed = true; 3580 } 3581 3582 end = off + BPF_LDST_BYTES(insn); 3583 if (end > range_end) { 3584 new_end = end; 3585 changed = true; 3586 } 3587 3588 if (!changed) 3589 continue; 3590 3591 if (new_end - new_start <= 64) { 3592 /* Install new range. */ 3593 range_start = new_start; 3594 range_end = new_end; 3595 continue; 3596 } 3597 } 3598 3599 end_current_then_start_new: 3600 range_node->pkt_cache.range_start = range_start; 3601 range_node->pkt_cache.range_end = range_end; 3602 start_new: 3603 range_node = meta; 3604 range_node->pkt_cache.do_init = true; 3605 range_ptr_id = range_node->ptr.id; 3606 range_ptr_off = range_node->ptr.off; 3607 range_start = insn->off; 3608 range_end = insn->off + BPF_LDST_BYTES(insn); 3609 } 3610 3611 if (range_node) { 3612 range_node->pkt_cache.range_start = range_start; 3613 range_node->pkt_cache.range_end = range_end; 3614 } 3615 3616 list_for_each_entry(meta, &nfp_prog->insns, l) { 3617 if (meta->skip) 3618 continue; 3619 3620 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3621 if (meta->pkt_cache.do_init) { 3622 range_start = meta->pkt_cache.range_start; 3623 range_end = meta->pkt_cache.range_end; 3624 } else { 3625 meta->pkt_cache.range_start = range_start; 3626 meta->pkt_cache.range_end = range_end; 3627 } 3628 } 3629 } 3630 } 3631 3632 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3633 { 3634 nfp_bpf_opt_reg_init(nfp_prog); 3635 3636 nfp_bpf_opt_neg_add_sub(nfp_prog); 3637 nfp_bpf_opt_ld_mask(nfp_prog); 3638 nfp_bpf_opt_ld_shift(nfp_prog); 3639 nfp_bpf_opt_ldst_gather(nfp_prog); 3640 nfp_bpf_opt_pkt_cache(nfp_prog); 3641 3642 return 0; 3643 } 3644 3645 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 3646 { 3647 struct nfp_insn_meta *meta1, *meta2; 3648 struct nfp_bpf_map *nfp_map; 3649 struct bpf_map *map; 3650 3651 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3652 if (meta1->skip || meta2->skip) 3653 continue; 3654 3655 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 3656 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 3657 continue; 3658 3659 map = (void *)(unsigned long)((u32)meta1->insn.imm | 3660 (u64)meta2->insn.imm << 32); 3661 if (bpf_map_offload_neutral(map)) 3662 continue; 3663 nfp_map = map_to_offmap(map)->dev_priv; 3664 3665 meta1->insn.imm = nfp_map->tid; 3666 meta2->insn.imm = 0; 3667 } 3668 3669 return 0; 3670 } 3671 3672 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3673 { 3674 __le64 *ustore = (__force __le64 *)prog; 3675 int i; 3676 3677 for (i = 0; i < len; i++) { 3678 int err; 3679 3680 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3681 if (err) 3682 return err; 3683 3684 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3685 } 3686 3687 return 0; 3688 } 3689 3690 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3691 { 3692 void *prog; 3693 3694 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3695 if (!prog) 3696 return; 3697 3698 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3699 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3700 kvfree(nfp_prog->prog); 3701 nfp_prog->prog = prog; 3702 } 3703 3704 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3705 { 3706 int ret; 3707 3708 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 3709 if (ret) 3710 return ret; 3711 3712 ret = nfp_bpf_optimize(nfp_prog); 3713 if (ret) 3714 return ret; 3715 3716 ret = nfp_translate(nfp_prog); 3717 if (ret) { 3718 pr_err("Translation failed with error %d (translated: %u)\n", 3719 ret, nfp_prog->n_translated); 3720 return -EINVAL; 3721 } 3722 3723 nfp_bpf_prog_trim(nfp_prog); 3724 3725 return ret; 3726 } 3727 3728 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3729 { 3730 struct nfp_insn_meta *meta; 3731 3732 /* Another pass to record jump information. */ 3733 list_for_each_entry(meta, &nfp_prog->insns, l) { 3734 u64 code = meta->insn.code; 3735 3736 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3737 BPF_OP(code) != BPF_CALL) { 3738 struct nfp_insn_meta *dst_meta; 3739 unsigned short dst_indx; 3740 3741 dst_indx = meta->n + 1 + meta->insn.off; 3742 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3743 cnt); 3744 3745 meta->jmp_dst = dst_meta; 3746 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3747 } 3748 } 3749 } 3750 3751 bool nfp_bpf_supported_opcode(u8 code) 3752 { 3753 return !!instr_cb[code]; 3754 } 3755 3756 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3757 { 3758 unsigned int i; 3759 u64 *prog; 3760 int err; 3761 3762 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3763 GFP_KERNEL); 3764 if (!prog) 3765 return ERR_PTR(-ENOMEM); 3766 3767 for (i = 0; i < nfp_prog->prog_len; i++) { 3768 enum nfp_relo_type special; 3769 u32 val; 3770 3771 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3772 switch (special) { 3773 case RELO_NONE: 3774 continue; 3775 case RELO_BR_REL: 3776 br_add_offset(&prog[i], bv->start_off); 3777 break; 3778 case RELO_BR_GO_OUT: 3779 br_set_offset(&prog[i], 3780 nfp_prog->tgt_out + bv->start_off); 3781 break; 3782 case RELO_BR_GO_ABORT: 3783 br_set_offset(&prog[i], 3784 nfp_prog->tgt_abort + bv->start_off); 3785 break; 3786 case RELO_BR_NEXT_PKT: 3787 br_set_offset(&prog[i], bv->tgt_done); 3788 break; 3789 case RELO_BR_HELPER: 3790 val = br_get_offset(prog[i]); 3791 val -= BR_OFF_RELO; 3792 switch (val) { 3793 case BPF_FUNC_map_lookup_elem: 3794 val = nfp_prog->bpf->helpers.map_lookup; 3795 break; 3796 case BPF_FUNC_map_update_elem: 3797 val = nfp_prog->bpf->helpers.map_update; 3798 break; 3799 case BPF_FUNC_map_delete_elem: 3800 val = nfp_prog->bpf->helpers.map_delete; 3801 break; 3802 case BPF_FUNC_perf_event_output: 3803 val = nfp_prog->bpf->helpers.perf_event_output; 3804 break; 3805 default: 3806 pr_err("relocation of unknown helper %d\n", 3807 val); 3808 err = -EINVAL; 3809 goto err_free_prog; 3810 } 3811 br_set_offset(&prog[i], val); 3812 break; 3813 case RELO_IMMED_REL: 3814 immed_add_value(&prog[i], bv->start_off); 3815 break; 3816 } 3817 3818 prog[i] &= ~OP_RELO_TYPE; 3819 } 3820 3821 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 3822 if (err) 3823 goto err_free_prog; 3824 3825 return prog; 3826 3827 err_free_prog: 3828 kfree(prog); 3829 return ERR_PTR(err); 3830 } 3831