1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2018 Universita` di Pisa
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/ioctl.h>
33 #include <sys/mman.h>
34 #include <fcntl.h>
35 #include <inttypes.h>
36 #include <stdlib.h>
37 #include <stdio.h>
38 #include <stdarg.h>
39 #include <string.h>
40 #include <unistd.h>
41 #include <errno.h>
42 #include <net/netmap_user.h>
43 #define LIBNETMAP_NOTHREADSAFE
44 #include "libnetmap.h"
45
46 struct nmport_cleanup_d {
47 struct nmport_cleanup_d *next;
48 void (*cleanup)(struct nmport_cleanup_d *, struct nmport_d *);
49 };
50
51 static void
nmport_push_cleanup(struct nmport_d * d,struct nmport_cleanup_d * c)52 nmport_push_cleanup(struct nmport_d *d, struct nmport_cleanup_d *c)
53 {
54 c->next = d->clist;
55 d->clist = c;
56 }
57
58 static void
nmport_pop_cleanup(struct nmport_d * d)59 nmport_pop_cleanup(struct nmport_d *d)
60 {
61 struct nmport_cleanup_d *top;
62
63 top = d->clist;
64 d->clist = d->clist->next;
65 (*top->cleanup)(top, d);
66 nmctx_free(d->ctx, top);
67 }
68
nmport_do_cleanup(struct nmport_d * d)69 void nmport_do_cleanup(struct nmport_d *d)
70 {
71 while (d->clist != NULL) {
72 nmport_pop_cleanup(d);
73 }
74 }
75
76 static struct nmport_d *
nmport_new_with_ctx(struct nmctx * ctx)77 nmport_new_with_ctx(struct nmctx *ctx)
78 {
79 struct nmport_d *d;
80
81 /* allocate a descriptor */
82 d = nmctx_malloc(ctx, sizeof(*d));
83 if (d == NULL) {
84 nmctx_ferror(ctx, "cannot allocate nmport descriptor");
85 goto out;
86 }
87 memset(d, 0, sizeof(*d));
88
89 nmreq_header_init(&d->hdr, NETMAP_REQ_REGISTER, &d->reg);
90
91 d->ctx = ctx;
92 d->fd = -1;
93
94 out:
95 return d;
96 }
97
98 struct nmport_d *
nmport_new(void)99 nmport_new(void)
100 {
101 struct nmctx *ctx = nmctx_get();
102 return nmport_new_with_ctx(ctx);
103 }
104
105
106 void
nmport_delete(struct nmport_d * d)107 nmport_delete(struct nmport_d *d)
108 {
109 nmctx_free(d->ctx, d);
110 }
111
112 void
nmport_extmem_cleanup(struct nmport_cleanup_d * c,struct nmport_d * d)113 nmport_extmem_cleanup(struct nmport_cleanup_d *c, struct nmport_d *d)
114 {
115 (void)c;
116
117 if (d->extmem == NULL)
118 return;
119
120 nmreq_remove_option(&d->hdr, &d->extmem->nro_opt);
121 nmctx_free(d->ctx, d->extmem);
122 d->extmem = NULL;
123 }
124
125
126 int
nmport_extmem(struct nmport_d * d,void * base,size_t size)127 nmport_extmem(struct nmport_d *d, void *base, size_t size)
128 {
129 struct nmctx *ctx = d->ctx;
130 struct nmport_cleanup_d *clnup = NULL;
131
132 if (d->register_done) {
133 nmctx_ferror(ctx, "%s: cannot set extmem of an already registered port", d->hdr.nr_name);
134 errno = EINVAL;
135 return -1;
136 }
137
138 if (d->extmem != NULL) {
139 nmctx_ferror(ctx, "%s: extmem already in use", d->hdr.nr_name);
140 errno = EINVAL;
141 return -1;
142 }
143
144 clnup = (struct nmport_cleanup_d *)nmctx_malloc(ctx, sizeof(*clnup));
145 if (clnup == NULL) {
146 nmctx_ferror(ctx, "failed to allocate cleanup descriptor");
147 errno = ENOMEM;
148 return -1;
149 }
150
151 d->extmem = nmctx_malloc(ctx, sizeof(*d->extmem));
152 if (d->extmem == NULL) {
153 nmctx_ferror(ctx, "%s: cannot allocate extmem option", d->hdr.nr_name);
154 nmctx_free(ctx, clnup);
155 errno = ENOMEM;
156 return -1;
157 }
158 memset(d->extmem, 0, sizeof(*d->extmem));
159 d->extmem->nro_usrptr = (uintptr_t)base;
160 d->extmem->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM;
161 d->extmem->nro_info.nr_memsize = size;
162 nmreq_push_option(&d->hdr, &d->extmem->nro_opt);
163
164 clnup->cleanup = nmport_extmem_cleanup;
165 nmport_push_cleanup(d, clnup);
166
167 return 0;
168 }
169
170 struct nmport_extmem_from_file_cleanup_d {
171 struct nmport_cleanup_d up;
172 void *p;
173 size_t size;
174 };
175
nmport_extmem_from_file_cleanup(struct nmport_cleanup_d * c,struct nmport_d * d)176 void nmport_extmem_from_file_cleanup(struct nmport_cleanup_d *c,
177 struct nmport_d *d)
178 {
179 (void)d;
180 struct nmport_extmem_from_file_cleanup_d *cc =
181 (struct nmport_extmem_from_file_cleanup_d *)c;
182
183 munmap(cc->p, cc->size);
184 }
185
186 int
nmport_extmem_from_file(struct nmport_d * d,const char * fname)187 nmport_extmem_from_file(struct nmport_d *d, const char *fname)
188 {
189 struct nmctx *ctx = d->ctx;
190 int fd = -1;
191 off_t mapsize;
192 void *p;
193 struct nmport_extmem_from_file_cleanup_d *clnup = NULL;
194
195 clnup = nmctx_malloc(ctx, sizeof(*clnup));
196 if (clnup == NULL) {
197 nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
198 errno = ENOMEM;
199 goto fail;
200 }
201
202 fd = open(fname, O_RDWR);
203 if (fd < 0) {
204 nmctx_ferror(ctx, "cannot open '%s': %s", fname, strerror(errno));
205 goto fail;
206 }
207 mapsize = lseek(fd, 0, SEEK_END);
208 if (mapsize < 0) {
209 nmctx_ferror(ctx, "failed to obtain filesize of '%s': %s", fname, strerror(errno));
210 goto fail;
211 }
212 p = mmap(0, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
213 if (p == MAP_FAILED) {
214 nmctx_ferror(ctx, "cannot mmap '%s': %s", fname, strerror(errno));
215 goto fail;
216 }
217 close(fd);
218
219 clnup->p = p;
220 clnup->size = mapsize;
221 clnup->up.cleanup = nmport_extmem_from_file_cleanup;
222 nmport_push_cleanup(d, &clnup->up);
223
224 if (nmport_extmem(d, p, mapsize) < 0)
225 goto fail;
226
227 return 0;
228
229 fail:
230 if (fd >= 0)
231 close(fd);
232 if (clnup != NULL) {
233 if (clnup->p != MAP_FAILED)
234 nmport_pop_cleanup(d);
235 else
236 nmctx_free(ctx, clnup);
237 }
238 return -1;
239 }
240
241 struct nmreq_pools_info*
nmport_extmem_getinfo(struct nmport_d * d)242 nmport_extmem_getinfo(struct nmport_d *d)
243 {
244 if (d->extmem == NULL)
245 return NULL;
246 return &d->extmem->nro_info;
247 }
248
249 struct nmport_offset_cleanup_d {
250 struct nmport_cleanup_d up;
251 struct nmreq_opt_offsets *opt;
252 };
253
254 static void
nmport_offset_cleanup(struct nmport_cleanup_d * c,struct nmport_d * d)255 nmport_offset_cleanup(struct nmport_cleanup_d *c,
256 struct nmport_d *d)
257 {
258 struct nmport_offset_cleanup_d *cc =
259 (struct nmport_offset_cleanup_d *)c;
260
261 nmreq_remove_option(&d->hdr, &cc->opt->nro_opt);
262 nmctx_free(d->ctx, cc->opt);
263 }
264
265 int
nmport_offset(struct nmport_d * d,uint64_t initial,uint64_t maxoff,uint64_t bits,uint64_t mingap)266 nmport_offset(struct nmport_d *d, uint64_t initial,
267 uint64_t maxoff, uint64_t bits, uint64_t mingap)
268 {
269 struct nmctx *ctx = d->ctx;
270 struct nmreq_opt_offsets *opt;
271 struct nmport_offset_cleanup_d *clnup = NULL;
272
273 clnup = nmctx_malloc(ctx, sizeof(*clnup));
274 if (clnup == NULL) {
275 nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
276 errno = ENOMEM;
277 return -1;
278 }
279
280 opt = nmctx_malloc(ctx, sizeof(*opt));
281 if (opt == NULL) {
282 nmctx_ferror(ctx, "%s: cannot allocate offset option", d->hdr.nr_name);
283 nmctx_free(ctx, clnup);
284 errno = ENOMEM;
285 return -1;
286 }
287 memset(opt, 0, sizeof(*opt));
288 opt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_OFFSETS;
289 opt->nro_offset_bits = bits;
290 opt->nro_initial_offset = initial;
291 opt->nro_max_offset = maxoff;
292 opt->nro_min_gap = mingap;
293 nmreq_push_option(&d->hdr, &opt->nro_opt);
294
295 clnup->up.cleanup = nmport_offset_cleanup;
296 clnup->opt = opt;
297 nmport_push_cleanup(d, &clnup->up);
298
299 return 0;
300 }
301
302 /* head of the list of options */
303 static struct nmreq_opt_parser *nmport_opt_parsers;
304
305 #define NPOPT_PARSER(o) nmport_opt_##o##_parser
306 #define NPOPT_DESC(o) nmport_opt_##o##_desc
307 #define NPOPT_NRKEYS(o) (NPOPT_DESC(o).nr_keys)
308 #define NPOPT_DECL(o, f) \
309 static int NPOPT_PARSER(o)(struct nmreq_parse_ctx *); \
310 static struct nmreq_opt_parser NPOPT_DESC(o) = { \
311 .prefix = #o, \
312 .parse = NPOPT_PARSER(o), \
313 .flags = (f), \
314 .default_key = -1, \
315 .nr_keys = 0, \
316 .next = NULL, \
317 }; \
318 static void __attribute__((constructor)) \
319 nmport_opt_##o##_ctor(void) \
320 { \
321 NPOPT_DESC(o).next = nmport_opt_parsers; \
322 nmport_opt_parsers = &NPOPT_DESC(o); \
323 }
324 struct nmport_key_desc {
325 struct nmreq_opt_parser *option;
326 const char *key;
327 unsigned int flags;
328 int id;
329 };
330 static void
nmport_opt_key_ctor(struct nmport_key_desc * k)331 nmport_opt_key_ctor(struct nmport_key_desc *k)
332 {
333 struct nmreq_opt_parser *o = k->option;
334 struct nmreq_opt_key *ok;
335
336 k->id = o->nr_keys;
337 ok = &o->keys[k->id];
338 ok->key = k->key;
339 ok->id = k->id;
340 ok->flags = k->flags;
341 o->nr_keys++;
342 if (ok->flags & NMREQ_OPTK_DEFAULT)
343 o->default_key = ok->id;
344 }
345 #define NPKEY_DESC(o, k) nmport_opt_##o##_key_##k##_desc
346 #define NPKEY_ID(o, k) (NPKEY_DESC(o, k).id)
347 #define NPKEY_DECL(o, k, f) \
348 static struct nmport_key_desc NPKEY_DESC(o, k) = { \
349 .option = &NPOPT_DESC(o), \
350 .key = #k, \
351 .flags = (f), \
352 .id = -1, \
353 }; \
354 static void __attribute__((constructor)) \
355 nmport_opt_##o##_key_##k##_ctor(void) \
356 { \
357 nmport_opt_key_ctor(&NPKEY_DESC(o, k)); \
358 }
359 #define nmport_key(p, o, k) ((p)->keys[NPKEY_ID(o, k)])
360 #define nmport_defkey(p, o) ((p)->keys[NPOPT_DESC(o).default_key])
361
362 NPOPT_DECL(share, 0)
363 NPKEY_DECL(share, port, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
364 NPOPT_DECL(extmem, 0)
365 NPKEY_DECL(extmem, file, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
366 NPKEY_DECL(extmem, if_num, 0)
367 NPKEY_DECL(extmem, if_size, 0)
368 NPKEY_DECL(extmem, ring_num, 0)
369 NPKEY_DECL(extmem, ring_size, 0)
370 NPKEY_DECL(extmem, buf_num, 0)
371 NPKEY_DECL(extmem, buf_size, 0)
372 NPOPT_DECL(conf, 0)
373 NPKEY_DECL(conf, rings, 0)
374 NPKEY_DECL(conf, host_rings, 0)
375 NPKEY_DECL(conf, slots, 0)
376 NPKEY_DECL(conf, tx_rings, 0)
377 NPKEY_DECL(conf, rx_rings, 0)
378 NPKEY_DECL(conf, host_tx_rings, 0)
379 NPKEY_DECL(conf, host_rx_rings, 0)
380 NPKEY_DECL(conf, tx_slots, 0)
381 NPKEY_DECL(conf, rx_slots, 0)
NPOPT_DECL(offset,NMREQ_OPTF_DISABLED)382 NPOPT_DECL(offset, NMREQ_OPTF_DISABLED)
383 NPKEY_DECL(offset, initial, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
384 NPKEY_DECL(offset, bits, 0)
385
386
387 static int
388 NPOPT_PARSER(share)(struct nmreq_parse_ctx *p)
389 {
390 struct nmctx *ctx = p->ctx;
391 struct nmport_d *d = p->token;
392 int32_t mem_id;
393 const char *v = nmport_defkey(p, share);
394
395 mem_id = nmreq_get_mem_id(&v, ctx);
396 if (mem_id < 0)
397 return -1;
398 if (d->reg.nr_mem_id && d->reg.nr_mem_id != mem_id) {
399 nmctx_ferror(ctx, "cannot set mem_id to %"PRId32", already set to %"PRIu16"",
400 mem_id, d->reg.nr_mem_id);
401 errno = EINVAL;
402 return -1;
403 }
404 d->reg.nr_mem_id = mem_id;
405 return 0;
406 }
407
408 static int
NPOPT_PARSER(extmem)409 NPOPT_PARSER(extmem)(struct nmreq_parse_ctx *p)
410 {
411 struct nmport_d *d;
412 struct nmreq_pools_info *pi;
413 int i;
414
415 d = p->token;
416
417 if (nmport_extmem_from_file(d, nmport_key(p, extmem, file)) < 0)
418 return -1;
419
420 pi = &d->extmem->nro_info;
421
422 for (i = 0; i < NPOPT_NRKEYS(extmem); i++) {
423 const char *k = p->keys[i];
424 uint32_t v;
425
426 if (k == NULL)
427 continue;
428
429 v = atoi(k);
430 if (i == NPKEY_ID(extmem, if_num)) {
431 pi->nr_if_pool_objtotal = v;
432 } else if (i == NPKEY_ID(extmem, if_size)) {
433 pi->nr_if_pool_objsize = v;
434 } else if (i == NPKEY_ID(extmem, ring_num)) {
435 pi->nr_ring_pool_objtotal = v;
436 } else if (i == NPKEY_ID(extmem, ring_size)) {
437 pi->nr_ring_pool_objsize = v;
438 } else if (i == NPKEY_ID(extmem, buf_num)) {
439 pi->nr_buf_pool_objtotal = v;
440 } else if (i == NPKEY_ID(extmem, buf_size)) {
441 pi->nr_buf_pool_objsize = v;
442 }
443 }
444 return 0;
445 }
446
447 static int
NPOPT_PARSER(conf)448 NPOPT_PARSER(conf)(struct nmreq_parse_ctx *p)
449 {
450 struct nmport_d *d;
451
452 d = p->token;
453
454 if (nmport_key(p, conf, rings) != NULL) {
455 uint16_t nr_rings = atoi(nmport_key(p, conf, rings));
456 d->reg.nr_tx_rings = nr_rings;
457 d->reg.nr_rx_rings = nr_rings;
458 }
459 if (nmport_key(p, conf, host_rings) != NULL) {
460 uint16_t nr_rings = atoi(nmport_key(p, conf, host_rings));
461 d->reg.nr_host_tx_rings = nr_rings;
462 d->reg.nr_host_rx_rings = nr_rings;
463 }
464 if (nmport_key(p, conf, slots) != NULL) {
465 uint32_t nr_slots = atoi(nmport_key(p, conf, slots));
466 d->reg.nr_tx_slots = nr_slots;
467 d->reg.nr_rx_slots = nr_slots;
468 }
469 if (nmport_key(p, conf, tx_rings) != NULL) {
470 d->reg.nr_tx_rings = atoi(nmport_key(p, conf, tx_rings));
471 }
472 if (nmport_key(p, conf, rx_rings) != NULL) {
473 d->reg.nr_rx_rings = atoi(nmport_key(p, conf, rx_rings));
474 }
475 if (nmport_key(p, conf, host_tx_rings) != NULL) {
476 d->reg.nr_host_tx_rings = atoi(nmport_key(p, conf, host_tx_rings));
477 }
478 if (nmport_key(p, conf, host_rx_rings) != NULL) {
479 d->reg.nr_host_rx_rings = atoi(nmport_key(p, conf, host_rx_rings));
480 }
481 if (nmport_key(p, conf, tx_slots) != NULL) {
482 d->reg.nr_tx_slots = atoi(nmport_key(p, conf, tx_slots));
483 }
484 if (nmport_key(p, conf, rx_slots) != NULL) {
485 d->reg.nr_rx_slots = atoi(nmport_key(p, conf, rx_slots));
486 }
487 return 0;
488 }
489
490 static int
NPOPT_PARSER(offset)491 NPOPT_PARSER(offset)(struct nmreq_parse_ctx *p)
492 {
493 struct nmport_d *d;
494 uint64_t initial, bits;
495
496 d = p->token;
497
498 initial = atoi(nmport_key(p, offset, initial));
499 bits = 0;
500 if (nmport_key(p, offset, bits) != NULL)
501 bits = atoi(nmport_key(p, offset, bits));
502
503 return nmport_offset(d, initial, initial, bits, 0);
504 }
505
506
507 void
nmport_disable_option(const char * opt)508 nmport_disable_option(const char *opt)
509 {
510 struct nmreq_opt_parser *p;
511
512 for (p = nmport_opt_parsers; p != NULL; p = p->next) {
513 if (!strcmp(p->prefix, opt)) {
514 p->flags |= NMREQ_OPTF_DISABLED;
515 }
516 }
517 }
518
519 int
nmport_enable_option(const char * opt)520 nmport_enable_option(const char *opt)
521 {
522 struct nmreq_opt_parser *p;
523
524 for (p = nmport_opt_parsers; p != NULL; p = p->next) {
525 if (!strcmp(p->prefix, opt)) {
526 p->flags &= ~NMREQ_OPTF_DISABLED;
527 return 0;
528 }
529 }
530 errno = EOPNOTSUPP;
531 return -1;
532 }
533
534
535 int
nmport_parse(struct nmport_d * d,const char * ifname)536 nmport_parse(struct nmport_d *d, const char *ifname)
537 {
538 const char *scan = ifname;
539
540 if (nmreq_header_decode(&scan, &d->hdr, d->ctx) < 0) {
541 goto err;
542 }
543
544 /* parse the register request */
545 if (nmreq_register_decode(&scan, &d->reg, d->ctx) < 0) {
546 goto err;
547 }
548
549 /* parse the options, if any */
550 if (nmreq_options_decode(scan, nmport_opt_parsers, d, d->ctx) < 0) {
551 goto err;
552 }
553 return 0;
554
555 err:
556 nmport_undo_parse(d);
557 return -1;
558 }
559
560 void
nmport_undo_parse(struct nmport_d * d)561 nmport_undo_parse(struct nmport_d *d)
562 {
563 nmport_do_cleanup(d);
564 memset(&d->reg, 0, sizeof(d->reg));
565 memset(&d->hdr, 0, sizeof(d->hdr));
566 }
567
568 struct nmport_d *
nmport_prepare(const char * ifname)569 nmport_prepare(const char *ifname)
570 {
571 struct nmport_d *d;
572
573 /* allocate a descriptor */
574 d = nmport_new();
575 if (d == NULL)
576 goto err;
577
578 /* parse the header */
579 if (nmport_parse(d, ifname) < 0)
580 goto err;
581
582 return d;
583
584 err:
585 nmport_undo_prepare(d);
586 return NULL;
587 }
588
589 void
nmport_undo_prepare(struct nmport_d * d)590 nmport_undo_prepare(struct nmport_d *d)
591 {
592 if (d == NULL)
593 return;
594 nmport_undo_parse(d);
595 nmport_delete(d);
596 }
597
598 int
nmport_register(struct nmport_d * d)599 nmport_register(struct nmport_d *d)
600 {
601 struct nmctx *ctx = d->ctx;
602
603 if (d->register_done) {
604 errno = EINVAL;
605 nmctx_ferror(ctx, "%s: already registered", d->hdr.nr_name);
606 return -1;
607 }
608
609 d->fd = open("/dev/netmap", O_RDWR);
610 if (d->fd < 0) {
611 nmctx_ferror(ctx, "/dev/netmap: %s", strerror(errno));
612 goto err;
613 }
614
615 if (ioctl(d->fd, NIOCCTRL, &d->hdr) < 0) {
616 struct nmreq_option *o;
617 int option_errors = 0;
618
619 nmreq_foreach_option(&d->hdr, o) {
620 if (o->nro_status) {
621 nmctx_ferror(ctx, "%s: option %s: %s",
622 d->hdr.nr_name,
623 nmreq_option_name(o->nro_reqtype),
624 strerror(o->nro_status));
625 option_errors++;
626 }
627
628 }
629 if (!option_errors)
630 nmctx_ferror(ctx, "%s: %s", d->hdr.nr_name, strerror(errno));
631 goto err;
632 }
633
634 d->register_done = 1;
635
636 return 0;
637
638 err:
639 nmport_undo_register(d);
640 return -1;
641 }
642
643 void
nmport_undo_register(struct nmport_d * d)644 nmport_undo_register(struct nmport_d *d)
645 {
646 if (d->fd >= 0)
647 close(d->fd);
648 d->fd = -1;
649 d->register_done = 0;
650 }
651
652 /* lookup the mem_id in the mem-list: do a new mmap() if
653 * not found, reuse existing otherwise
654 */
655 int
nmport_mmap(struct nmport_d * d)656 nmport_mmap(struct nmport_d *d)
657 {
658 struct nmctx *ctx = d->ctx;
659 struct nmem_d *m = NULL;
660 u_int num_tx, num_rx;
661 unsigned int i;
662
663 if (d->mmap_done) {
664 errno = EINVAL;
665 nmctx_ferror(ctx, "%s: already mapped", d->hdr.nr_name);
666 return -1;
667 }
668
669 if (!d->register_done) {
670 errno = EINVAL;
671 nmctx_ferror(ctx, "cannot map unregistered port");
672 return -1;
673 }
674
675 nmctx_lock(ctx);
676
677 for (m = ctx->mem_descs; m != NULL; m = m->next)
678 if (m->mem_id == d->reg.nr_mem_id)
679 break;
680
681 if (m == NULL) {
682 m = nmctx_malloc(ctx, sizeof(*m));
683 if (m == NULL) {
684 nmctx_ferror(ctx, "cannot allocate memory descriptor");
685 goto err;
686 }
687 memset(m, 0, sizeof(*m));
688 if (d->extmem != NULL) {
689 m->mem = (void *)((uintptr_t)d->extmem->nro_usrptr);
690 m->size = d->extmem->nro_info.nr_memsize;
691 m->is_extmem = 1;
692 } else {
693 m->mem = mmap(NULL, d->reg.nr_memsize, PROT_READ|PROT_WRITE,
694 MAP_SHARED, d->fd, 0);
695 if (m->mem == MAP_FAILED) {
696 nmctx_ferror(ctx, "mmap: %s", strerror(errno));
697 goto err;
698 }
699 m->size = d->reg.nr_memsize;
700 }
701 m->mem_id = d->reg.nr_mem_id;
702 m->next = ctx->mem_descs;
703 if (ctx->mem_descs != NULL)
704 ctx->mem_descs->prev = m;
705 ctx->mem_descs = m;
706 }
707 m->refcount++;
708
709 nmctx_unlock(ctx);
710
711 d->mem = m;
712
713 d->nifp = NETMAP_IF(m->mem, d->reg.nr_offset);
714
715 num_tx = d->reg.nr_tx_rings + d->nifp->ni_host_tx_rings;
716 for (i = 0; i < num_tx && !d->nifp->ring_ofs[i]; i++)
717 ;
718 d->cur_tx_ring = d->first_tx_ring = i;
719 for ( ; i < num_tx && d->nifp->ring_ofs[i]; i++)
720 ;
721 d->last_tx_ring = i - 1;
722
723 num_rx = d->reg.nr_rx_rings + d->nifp->ni_host_rx_rings;
724 for (i = 0; i < num_rx && !d->nifp->ring_ofs[i + num_tx]; i++)
725 ;
726 d->cur_rx_ring = d->first_rx_ring = i;
727 for ( ; i < num_rx && d->nifp->ring_ofs[i + num_tx]; i++)
728 ;
729 d->last_rx_ring = i - 1;
730
731 d->mmap_done = 1;
732
733 return 0;
734
735 err:
736 nmctx_unlock(ctx);
737 nmport_undo_mmap(d);
738 return -1;
739 }
740
741 void
nmport_undo_mmap(struct nmport_d * d)742 nmport_undo_mmap(struct nmport_d *d)
743 {
744 struct nmem_d *m;
745 struct nmctx *ctx = d->ctx;
746
747 m = d->mem;
748 if (m == NULL)
749 return;
750 nmctx_lock(ctx);
751 m->refcount--;
752 if (m->refcount <= 0) {
753 if (!m->is_extmem && m->mem != MAP_FAILED)
754 munmap(m->mem, m->size);
755 /* extract from the list and free */
756 if (m->next != NULL)
757 m->next->prev = m->prev;
758 if (m->prev != NULL)
759 m->prev->next = m->next;
760 else
761 ctx->mem_descs = m->next;
762 nmctx_free(ctx, m);
763 d->mem = NULL;
764 }
765 nmctx_unlock(ctx);
766 d->mmap_done = 0;
767 d->mem = NULL;
768 d->nifp = NULL;
769 d->first_tx_ring = 0;
770 d->last_tx_ring = 0;
771 d->first_rx_ring = 0;
772 d->last_rx_ring = 0;
773 d->cur_tx_ring = 0;
774 d->cur_rx_ring = 0;
775 }
776
777 int
nmport_open_desc(struct nmport_d * d)778 nmport_open_desc(struct nmport_d *d)
779 {
780 if (nmport_register(d) < 0)
781 goto err;
782
783 if (nmport_mmap(d) < 0)
784 goto err;
785
786 return 0;
787 err:
788 nmport_undo_open_desc(d);
789 return -1;
790 }
791
792 void
nmport_undo_open_desc(struct nmport_d * d)793 nmport_undo_open_desc(struct nmport_d *d)
794 {
795 nmport_undo_mmap(d);
796 nmport_undo_register(d);
797 }
798
799
800 struct nmport_d *
nmport_open(const char * ifname)801 nmport_open(const char *ifname)
802 {
803 struct nmport_d *d;
804
805 /* prepare the descriptor */
806 d = nmport_prepare(ifname);
807 if (d == NULL)
808 goto err;
809
810 /* open netmap and register */
811 if (nmport_open_desc(d) < 0)
812 goto err;
813
814 return d;
815
816 err:
817 nmport_close(d);
818 return NULL;
819 }
820
821 void
nmport_close(struct nmport_d * d)822 nmport_close(struct nmport_d *d)
823 {
824 if (d == NULL)
825 return;
826 nmport_undo_open_desc(d);
827 nmport_undo_prepare(d);
828 }
829
830 struct nmport_d *
nmport_clone(struct nmport_d * d)831 nmport_clone(struct nmport_d *d)
832 {
833 struct nmport_d *c;
834 struct nmctx *ctx;
835
836 ctx = d->ctx;
837
838 if (d->extmem != NULL && !d->register_done) {
839 errno = EINVAL;
840 nmctx_ferror(ctx, "cannot clone unregistered port that is using extmem");
841 return NULL;
842 }
843
844 c = nmport_new_with_ctx(ctx);
845 if (c == NULL)
846 return NULL;
847 /* copy the output of parse */
848 c->hdr = d->hdr;
849 /* redirect the pointer to the body */
850 c->hdr.nr_body = (uintptr_t)&c->reg;
851 /* options are not cloned */
852 c->hdr.nr_options = 0;
853 c->reg = d->reg; /* this also copies the mem_id */
854 /* put the new port in an un-registered, unmapped state */
855 c->fd = -1;
856 c->nifp = NULL;
857 c->register_done = 0;
858 c->mem = NULL;
859 c->extmem = NULL;
860 c->mmap_done = 0;
861 c->first_tx_ring = 0;
862 c->last_tx_ring = 0;
863 c->first_rx_ring = 0;
864 c->last_rx_ring = 0;
865 c->cur_tx_ring = 0;
866 c->cur_rx_ring = 0;
867
868 return c;
869 }
870
871 int
nmport_inject(struct nmport_d * d,const void * buf,size_t size)872 nmport_inject(struct nmport_d *d, const void *buf, size_t size)
873 {
874 u_int c, n = d->last_tx_ring - d->first_tx_ring + 1,
875 ri = d->cur_tx_ring;
876
877 for (c = 0; c < n ; c++, ri++) {
878 /* compute current ring to use */
879 struct netmap_ring *ring;
880 uint32_t i, j, idx;
881 size_t rem;
882
883 if (ri > d->last_tx_ring)
884 ri = d->first_tx_ring;
885 ring = NETMAP_TXRING(d->nifp, ri);
886 rem = size;
887 j = ring->cur;
888 while (rem > ring->nr_buf_size && j != ring->tail) {
889 rem -= ring->nr_buf_size;
890 j = nm_ring_next(ring, j);
891 }
892 if (j == ring->tail && rem > 0)
893 continue;
894 i = ring->cur;
895 while (i != j) {
896 idx = ring->slot[i].buf_idx;
897 ring->slot[i].len = ring->nr_buf_size;
898 ring->slot[i].flags = NS_MOREFRAG;
899 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), ring->nr_buf_size);
900 i = nm_ring_next(ring, i);
901 buf = (char *)buf + ring->nr_buf_size;
902 }
903 idx = ring->slot[i].buf_idx;
904 ring->slot[i].len = rem;
905 ring->slot[i].flags = 0;
906 nm_pkt_copy(buf, NETMAP_BUF(ring, idx), rem);
907 ring->head = ring->cur = nm_ring_next(ring, i);
908 d->cur_tx_ring = ri;
909 return size;
910 }
911 return 0; /* fail */
912 }
913