Lines Matching +full:sync +full:- +full:update +full:- +full:mask

2  * Copyright (C) 2016-2018 Vincenzo Maffione
43 pause_sbt("sync-kloop-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)
56 /* Support for eventfd-based notifications. */
67 /* Issue a first store-store barrier to make sure writes to the in sync_kloop_kernel_write()
68 * netmap ring do not overcome updates on ktoa->hwcur and ktoa->hwtail. */ in sync_kloop_kernel_write()
83 * wmb() <-------------> rmb() in sync_kloop_kernel_write()
99 * We place a memory barrier to make sure that the update of head never in sync_kloop_kernel_read()
100 * overtakes the update of cur. in sync_kloop_kernel_read()
103 CSB_READ(ptr, head, shadow_ring->head); in sync_kloop_kernel_read()
105 CSB_READ(ptr, cur, shadow_ring->cur); in sync_kloop_kernel_read()
106 CSB_READ(ptr, sync_flags, shadow_ring->flags); in sync_kloop_kernel_read()
108 /* Make sure that loads from atok->head and atok->cur are not delayed in sync_kloop_kernel_read()
113 /* Enable or disable application --> kernel kicks. */
138 title, kring->name, kring->nr_hwcur, kring->rhead, in sync_kloop_kring_dump()
139 kring->rcur, kring->rtail, kring->nr_hwtail); in sync_kloop_kring_dump()
161 struct netmap_kring *kring = a->kring; in netmap_sync_kloop_tx_ring()
162 struct nm_csb_atok *csb_atok = a->csb_atok; in netmap_sync_kloop_tx_ring()
163 struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa; in netmap_sync_kloop_tx_ring()
175 num_slots = kring->nkr_num_slots; in netmap_sync_kloop_tx_ring()
177 /* Disable application --> kernel notifications. */ in netmap_sync_kloop_tx_ring()
178 if (!a->direct) { in netmap_sync_kloop_tx_ring()
185 batch = shadow_ring.head - kring->nr_hwcur; in netmap_sync_kloop_tx_ring()
193 uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots); in netmap_sync_kloop_tx_ring()
196 head_lim -= num_slots; in netmap_sync_kloop_tx_ring()
209 shadow_ring.tail = kring->rtail; in netmap_sync_kloop_tx_ring()
213 if (!a->busy_wait) { in netmap_sync_kloop_tx_ring()
223 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { in netmap_sync_kloop_tx_ring()
224 if (!a->busy_wait) { in netmap_sync_kloop_tx_ring()
225 /* Re-enable notifications. */ in netmap_sync_kloop_tx_ring()
234 * Copy kernel hwcur and hwtail into the CSB for the application sync(), and in netmap_sync_kloop_tx_ring()
237 sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, in netmap_sync_kloop_tx_ring()
238 kring->nr_hwtail); in netmap_sync_kloop_tx_ring()
239 if (kring->rtail != kring->nr_hwtail) { in netmap_sync_kloop_tx_ring()
241 kring->rtail = kring->nr_hwtail; in netmap_sync_kloop_tx_ring()
253 if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) { in netmap_sync_kloop_tx_ring()
254 /* We could disable kernel --> application kicks here, in netmap_sync_kloop_tx_ring()
256 eventfd_signal(a->irq_ctx, 1); in netmap_sync_kloop_tx_ring()
263 if (shadow_ring.head == kring->rhead) { in netmap_sync_kloop_tx_ring()
264 if (a->busy_wait) { in netmap_sync_kloop_tx_ring()
272 /* Re-enable notifications. */ in netmap_sync_kloop_tx_ring()
274 /* Double check, with store-load memory barrier. */ in netmap_sync_kloop_tx_ring()
277 if (shadow_ring.head != kring->rhead) { in netmap_sync_kloop_tx_ring()
297 if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) { in netmap_sync_kloop_tx_ring()
298 eventfd_signal(a->irq_ctx, 1); in netmap_sync_kloop_tx_ring()
309 return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head, in sync_kloop_norxslots()
310 kring->nkr_num_slots - 1)); in sync_kloop_norxslots()
317 struct netmap_kring *kring = a->kring; in netmap_sync_kloop_rx_ring()
318 struct nm_csb_atok *csb_atok = a->csb_atok; in netmap_sync_kloop_rx_ring()
319 struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa; in netmap_sync_kloop_rx_ring()
331 num_slots = kring->nkr_num_slots; in netmap_sync_kloop_rx_ring()
334 num_slots = kring->nkr_num_slots; in netmap_sync_kloop_rx_ring()
337 if (!a->direct) { in netmap_sync_kloop_rx_ring()
347 shadow_ring.tail = kring->rtail; in netmap_sync_kloop_rx_ring()
351 if (!a->busy_wait) { in netmap_sync_kloop_rx_ring()
361 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { in netmap_sync_kloop_rx_ring()
362 if (!a->busy_wait) { in netmap_sync_kloop_rx_ring()
363 /* Re-enable notifications. */ in netmap_sync_kloop_rx_ring()
372 * Copy kernel hwcur and hwtail into the CSB for the application sync() in netmap_sync_kloop_rx_ring()
374 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); in netmap_sync_kloop_rx_ring()
375 sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail); in netmap_sync_kloop_rx_ring()
376 if (kring->rtail != hwtail) { in netmap_sync_kloop_rx_ring()
377 kring->rtail = hwtail; in netmap_sync_kloop_rx_ring()
392 if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) { in netmap_sync_kloop_rx_ring()
393 /* We could disable kernel --> application kicks here, in netmap_sync_kloop_rx_ring()
395 eventfd_signal(a->irq_ctx, 1); in netmap_sync_kloop_rx_ring()
403 if (a->busy_wait) { in netmap_sync_kloop_rx_ring()
411 /* Re-enable notifications. */ in netmap_sync_kloop_rx_ring()
413 /* Double check, with store-load memory barrier. */ in netmap_sync_kloop_rx_ring()
425 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); in netmap_sync_kloop_rx_ring()
426 if (unlikely(hwtail == kring->rhead || in netmap_sync_kloop_rx_ring()
431 hwtail, kring->rhead, dry_cycles); in netmap_sync_kloop_rx_ring()
440 if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) { in netmap_sync_kloop_rx_ring()
441 eventfd_signal(a->irq_ctx, 1); in netmap_sync_kloop_rx_ring()
460 * in case of custom wake-up function. */
485 struct sync_kloop_poll_entry *entry = poll_ctx->entries + in sync_kloop_poll_table_queue_proc()
486 poll_ctx->next_entry; in sync_kloop_poll_table_queue_proc()
488 BUG_ON(poll_ctx->next_entry >= poll_ctx->num_entries); in sync_kloop_poll_table_queue_proc()
489 entry->wqh = wqh; in sync_kloop_poll_table_queue_proc()
490 entry->filp = file; in sync_kloop_poll_table_queue_proc()
492 if (poll_ctx->next_wake_fun == NULL) { in sync_kloop_poll_table_queue_proc()
493 init_waitqueue_entry(&entry->wait, current); in sync_kloop_poll_table_queue_proc()
495 init_waitqueue_func_entry(&entry->wait, in sync_kloop_poll_table_queue_proc()
496 poll_ctx->next_wake_fun); in sync_kloop_poll_table_queue_proc()
498 add_wait_queue(wqh, &entry->wait); in sync_kloop_poll_table_queue_proc()
508 netmap_sync_kloop_tx_ring(entry->args); in sync_kloop_tx_kick_wake_fun()
519 struct sync_kloop_poll_ctx *poll_ctx = entry->parent; in sync_kloop_tx_irq_wake_fun()
522 for (i = 0; i < poll_ctx->num_tx_rings; i++) { in sync_kloop_tx_irq_wake_fun()
523 struct eventfd_ctx *irq_ctx = poll_ctx->entries[i].irq_ctx; in sync_kloop_tx_irq_wake_fun()
540 netmap_sync_kloop_rx_ring(entry->args); in sync_kloop_rx_kick_wake_fun()
551 struct sync_kloop_poll_ctx *poll_ctx = entry->parent; in sync_kloop_rx_irq_wake_fun()
554 for (i = poll_ctx->num_tx_rings; i < poll_ctx->num_rings; i++) { in sync_kloop_rx_irq_wake_fun()
555 struct eventfd_ctx *irq_ctx = poll_ctx->entries[i].irq_ctx; in sync_kloop_rx_irq_wake_fun()
570 (struct nmreq_sync_kloop_start *)(uintptr_t)hdr->nr_body; in netmap_sync_kloop()
577 uint32_t sleep_us = req->sleep_us; in netmap_sync_kloop()
594 if (priv->np_nifp == NULL) { in netmap_sync_kloop()
599 na = priv->np_na; in netmap_sync_kloop()
606 if (!priv->np_csb_atok_base || !priv->np_csb_ktoa_base) { in netmap_sync_kloop()
608 nm_prerr("sync-kloop on %s requires " in netmap_sync_kloop()
609 "NETMAP_REQ_OPT_CSB option", na->name); in netmap_sync_kloop()
613 csb_atok_base = priv->np_csb_atok_base; in netmap_sync_kloop()
614 csb_ktoa_base = priv->np_csb_ktoa_base; in netmap_sync_kloop()
617 if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { in netmap_sync_kloop()
620 priv->np_kloop_state |= NM_SYNC_KLOOP_RUNNING; in netmap_sync_kloop()
626 num_rx_rings = priv->np_qlast[NR_RX] - priv->np_qfirst[NR_RX]; in netmap_sync_kloop()
627 num_tx_rings = priv->np_qlast[NR_TX] - priv->np_qfirst[NR_TX]; in netmap_sync_kloop()
641 a->kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]]; in netmap_sync_kloop()
642 a->csb_atok = csb_atok_base + i; in netmap_sync_kloop()
643 a->csb_ktoa = csb_ktoa_base + i; in netmap_sync_kloop()
644 a->busy_wait = busy_wait; in netmap_sync_kloop()
645 a->direct = direct_tx; in netmap_sync_kloop()
650 a->kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]]; in netmap_sync_kloop()
651 a->csb_atok = csb_atok_base + num_tx_rings + i; in netmap_sync_kloop()
652 a->csb_ktoa = csb_ktoa_base + num_tx_rings + i; in netmap_sync_kloop()
653 a->busy_wait = busy_wait; in netmap_sync_kloop()
654 a->direct = direct_rx; in netmap_sync_kloop()
663 direct_tx = !!(mode_opt->mode & NM_OPT_SYNC_KLOOP_DIRECT_TX); in netmap_sync_kloop()
664 direct_rx = !!(mode_opt->mode & NM_OPT_SYNC_KLOOP_DIRECT_RX); in netmap_sync_kloop()
665 if (mode_opt->mode & ~(NM_OPT_SYNC_KLOOP_DIRECT_TX | in netmap_sync_kloop()
667 opt->nro_status = err = EINVAL; in netmap_sync_kloop()
670 opt->nro_status = 0; in netmap_sync_kloop()
674 if (opt->nro_size != sizeof(*eventfds_opt) + in netmap_sync_kloop()
675 sizeof(eventfds_opt->eventfds[0]) * num_rings) { in netmap_sync_kloop()
678 opt->nro_status = err = EINVAL; in netmap_sync_kloop()
683 opt->nro_status = 0; in netmap_sync_kloop()
689 if (eventfds_opt->eventfds[i].ioeventfd < 0) { in netmap_sync_kloop()
698 opt->nro_status = err = EINVAL; in netmap_sync_kloop()
706 (num_rings + 2) * sizeof(poll_ctx->entries[0])); in netmap_sync_kloop()
707 init_poll_funcptr(&poll_ctx->wait_table, in netmap_sync_kloop()
709 poll_ctx->num_entries = 2 + num_rings; in netmap_sync_kloop()
710 poll_ctx->num_tx_rings = num_tx_rings; in netmap_sync_kloop()
711 poll_ctx->num_rings = num_rings; in netmap_sync_kloop()
712 poll_ctx->next_entry = 0; in netmap_sync_kloop()
713 poll_ctx->next_wake_fun = NULL; in netmap_sync_kloop()
715 if (direct_tx && (na->na_flags & NAF_BDG_MAYSLEEP)) { in netmap_sync_kloop()
717 * wake-up context, where it is not possible in netmap_sync_kloop()
720 na->na_flags &= ~NAF_BDG_MAYSLEEP; in netmap_sync_kloop()
725 poll_ctx->entries[i].args = args + i; in netmap_sync_kloop()
726 poll_ctx->entries[i].parent = poll_ctx; in netmap_sync_kloop()
731 for (i = 0; i < num_rings; i++, poll_ctx->next_entry++) { in netmap_sync_kloop()
734 unsigned long mask; in netmap_sync_kloop() local
737 if (eventfds_opt->eventfds[i].irqfd >= 0) { in netmap_sync_kloop()
739 eventfds_opt->eventfds[i].irqfd); in netmap_sync_kloop()
750 poll_ctx->entries[i].irq_filp = filp; in netmap_sync_kloop()
751 poll_ctx->entries[i].irq_ctx = irq; in netmap_sync_kloop()
752 poll_ctx->entries[i].args->busy_wait = busy_wait; in netmap_sync_kloop()
755 poll_ctx->entries[i].args->irq_ctx = in netmap_sync_kloop()
758 poll_ctx->entries[i].irq_ctx; in netmap_sync_kloop()
759 poll_ctx->entries[i].args->direct = in netmap_sync_kloop()
764 eventfds_opt->eventfds[i].ioeventfd); in netmap_sync_kloop()
774 poll_ctx->next_wake_fun = in netmap_sync_kloop()
778 poll_ctx->next_wake_fun = in netmap_sync_kloop()
781 poll_ctx->next_wake_fun = NULL; in netmap_sync_kloop()
783 mask = filp->f_op->poll(filp, in netmap_sync_kloop()
784 &poll_ctx->wait_table); in netmap_sync_kloop()
785 if (mask & POLLERR) { in netmap_sync_kloop()
799 poll_ctx->next_wake_fun = direct_tx ? in netmap_sync_kloop()
801 poll_wait(priv->np_filp, priv->np_si[NR_TX], in netmap_sync_kloop()
802 &poll_ctx->wait_table); in netmap_sync_kloop()
803 poll_ctx->next_entry++; in netmap_sync_kloop()
805 poll_ctx->next_wake_fun = direct_rx ? in netmap_sync_kloop()
807 poll_wait(priv->np_filp, priv->np_si[NR_RX], in netmap_sync_kloop()
808 &poll_ctx->wait_table); in netmap_sync_kloop()
809 poll_ctx->next_entry++; in netmap_sync_kloop()
813 opt->nro_status = EOPNOTSUPP; in netmap_sync_kloop()
824 if (unlikely(NM_ACCESS_ONCE(priv->np_kloop_state) & NM_SYNC_KLOOP_STOPPING)) { in netmap_sync_kloop()
875 for (i = 0; i < poll_ctx->next_entry; i++) { in netmap_sync_kloop()
877 poll_ctx->entries + i; in netmap_sync_kloop()
879 if (entry->wqh) in netmap_sync_kloop()
880 remove_wait_queue(entry->wqh, &entry->wait); in netmap_sync_kloop()
884 if (entry->filp && entry->filp != priv->np_filp) in netmap_sync_kloop()
885 fput(entry->filp); in netmap_sync_kloop()
886 if (entry->irq_ctx) in netmap_sync_kloop()
887 eventfd_ctx_put(entry->irq_ctx); in netmap_sync_kloop()
888 if (entry->irq_filp) in netmap_sync_kloop()
889 fput(entry->irq_filp); in netmap_sync_kloop()
903 priv->np_kloop_state = 0; in netmap_sync_kloop()
905 na->na_flags |= NAF_BDG_MAYSLEEP; in netmap_sync_kloop()
919 if (priv->np_nifp == NULL) { in netmap_sync_kloop_stop()
924 na = priv->np_na; in netmap_sync_kloop_stop()
931 priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING; in netmap_sync_kloop_stop()
937 nm_os_selwakeup(priv->np_si[NR_RX]); in netmap_sync_kloop_stop()
943 running = (NM_ACCESS_ONCE(priv->np_kloop_state) in netmap_sync_kloop_stop()
961 * Guest user wants to transmit packets up to the one before ring->head,
962 * and guest kernel knows tx_ring->hwcur is the first packet unsent
978 atok->appl_need_kick = 0; in netmap_pt_guest_txsync()
984 kring->nr_hwcur = ktoa->hwcur; in netmap_pt_guest_txsync()
985 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); in netmap_pt_guest_txsync()
988 if (((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring)) in netmap_pt_guest_txsync()
989 && NM_ACCESS_ONCE(ktoa->kern_need_kick)) || in netmap_pt_guest_txsync()
991 atok->sync_flags = flags; in netmap_pt_guest_txsync()
999 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, in netmap_pt_guest_txsync()
1000 &kring->nr_hwcur); in netmap_pt_guest_txsync()
1008 if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) { in netmap_pt_guest_txsync()
1009 /* Re-enable notifications. */ in netmap_pt_guest_txsync()
1010 atok->appl_need_kick = 1; in netmap_pt_guest_txsync()
1011 /* Double check, with store-load memory barrier. */ in netmap_pt_guest_txsync()
1013 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, in netmap_pt_guest_txsync()
1014 &kring->nr_hwcur); in netmap_pt_guest_txsync()
1017 atok->appl_need_kick = 0; in netmap_pt_guest_txsync()
1022 kring->name, atok->head, atok->cur, ktoa->hwtail, in netmap_pt_guest_txsync()
1023 kring->rhead, kring->rcur, kring->nr_hwtail); in netmap_pt_guest_txsync()
1031 * Update hwcur/hwtail from host (reading from CSB).
1033 * If guest user has released buffers up to the one before ring->head, we
1046 atok->appl_need_kick = 0; in netmap_pt_guest_rxsync()
1053 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur); in netmap_pt_guest_rxsync()
1054 kring->nr_kflags &= ~NKR_PENDINTR; in netmap_pt_guest_rxsync()
1060 if (kring->rhead != kring->nr_hwcur) { in netmap_pt_guest_rxsync()
1061 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); in netmap_pt_guest_rxsync()
1069 if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) { in netmap_pt_guest_rxsync()
1070 /* Re-enable notifications. */ in netmap_pt_guest_rxsync()
1071 atok->appl_need_kick = 1; in netmap_pt_guest_rxsync()
1072 /* Double check, with store-load memory barrier. */ in netmap_pt_guest_rxsync()
1074 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, in netmap_pt_guest_rxsync()
1075 &kring->nr_hwcur); in netmap_pt_guest_rxsync()
1078 atok->appl_need_kick = 0; in netmap_pt_guest_rxsync()
1083 if ((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring)) in netmap_pt_guest_rxsync()
1084 && NM_ACCESS_ONCE(ktoa->kern_need_kick)) { in netmap_pt_guest_rxsync()
1085 atok->sync_flags = flags; in netmap_pt_guest_rxsync()
1090 kring->name, atok->head, atok->cur, ktoa->hwtail, in netmap_pt_guest_rxsync()
1091 kring->rhead, kring->rcur, kring->nr_hwtail); in netmap_pt_guest_rxsync()
1104 struct netmap_adapter *na_nm = &ptna->hwup.up; in ptnet_nm_krings_create()
1105 struct netmap_adapter *na_dr = &ptna->dr.up; in ptnet_nm_krings_create()
1108 if (ptna->backend_users) { in ptnet_nm_krings_create()
1119 na_dr->tx_rings = na_nm->tx_rings; in ptnet_nm_krings_create()
1120 na_dr->rx_rings = na_nm->rx_rings; in ptnet_nm_krings_create()
1130 struct netmap_adapter *na_nm = &ptna->hwup.up; in ptnet_nm_krings_delete()
1131 struct netmap_adapter *na_dr = &ptna->dr.up; in ptnet_nm_krings_delete()
1133 if (ptna->backend_users) { in ptnet_nm_krings_delete()
1137 na_dr->tx_rings = NULL; in ptnet_nm_krings_delete()
1138 na_dr->rx_rings = NULL; in ptnet_nm_krings_delete()
1149 netmap_mem_put(ptna->dr.up.nm_mem); in ptnet_nm_dtor()
1150 memset(&ptna->dr, 0, sizeof(ptna->dr)); in ptnet_nm_dtor()
1151 netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp); in ptnet_nm_dtor()
1159 if_t ifp = arg ? arg->ifp : NULL; in netmap_pt_guest_attach()
1163 arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid); in netmap_pt_guest_attach()
1164 if (arg->nm_mem == NULL) in netmap_pt_guest_attach()
1166 arg->na_flags |= NAF_MEM_OWNER; in netmap_pt_guest_attach()
1174 /* Initialize a separate pass-through netmap adapter that is going to in netmap_pt_guest_attach()
1177 memset(&ptna->dr, 0, sizeof(ptna->dr)); in netmap_pt_guest_attach()
1178 ptna->dr.up.ifp = ifp; in netmap_pt_guest_attach()
1179 ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem); in netmap_pt_guest_attach()
1180 ptna->dr.up.nm_config = ptna->hwup.up.nm_config; in netmap_pt_guest_attach()
1182 ptna->backend_users = 0; in netmap_pt_guest_attach()