svn commit: r356755 - in head/sys: net netinet netinet6 netpfil/ipfw/nat64 sys

Gleb Smirnoff glebius at FreeBSD.org
Wed Jan 15 06:05:23 UTC 2020


Author: glebius
Date: Wed Jan 15 06:05:20 2020
New Revision: 356755
URL: https://svnweb.freebsd.org/changeset/base/356755

Log:
  Introduce NET_EPOCH_CALL() macro and use it everywhere where we free
  data based on the network epoch.   The macro reverses the argument
  order of epoch_call(9) - first function, then its argument. NFC

Modified:
  head/sys/net/bpf.c
  head/sys/net/if.c
  head/sys/net/if_gre.c
  head/sys/net/if_lagg.c
  head/sys/net/if_vlan.c
  head/sys/netinet/in.c
  head/sys/netinet/in_pcb.c
  head/sys/netinet/ip_gre.c
  head/sys/netinet/tcp_ratelimit.c
  head/sys/netinet6/in6.c
  head/sys/netinet6/ip6_gre.c
  head/sys/netpfil/ipfw/nat64/nat64lsn.c
  head/sys/sys/epoch.h

Modified: head/sys/net/bpf.c
==============================================================================
--- head/sys/net/bpf.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/net/bpf.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -274,10 +274,10 @@ static struct filterops bpfread_filtops = {
  *
  * 2. An userland application uses ioctl() call to bpf_d descriptor.
  * All such call are serialized with global lock. BPF filters can be
- * changed, but pointer to old filter will be freed using epoch_call().
+ * changed, but pointer to old filter will be freed using NET_EPOCH_CALL().
  * Thus it should be safe for bpf_tap/bpf_mtap* code to do access to
  * filter pointers, even if change will happen during bpf_tap execution.
- * Destroying of bpf_d descriptor also is doing using epoch_call().
+ * Destroying of bpf_d descriptor also is doing using NET_EPOCH_CALL().
  *
  * 3. An userland application can write packets into bpf_d descriptor.
  * There we need to be sure, that ifnet won't disappear during bpfwrite().
@@ -288,7 +288,7 @@ static struct filterops bpfread_filtops = {
  *
  * 5. The kernel invokes bpfdetach() on interface destroying. All lists
  * are modified with global lock held and actual free() is done using
- * epoch_call().
+ * NET_EPOCH_CALL().
  */
 
 static void
@@ -314,7 +314,7 @@ bpfif_rele(struct bpf_if *bp)
 
 	if (!refcount_release(&bp->bif_refcnt))
 		return;
-	epoch_call(net_epoch_preempt, &bp->epoch_ctx, bpfif_free);
+	NET_EPOCH_CALL(bpfif_free, &bp->epoch_ctx);
 }
 
 static void
@@ -330,7 +330,7 @@ bpfd_rele(struct bpf_d *d)
 
 	if (!refcount_release(&d->bd_refcnt))
 		return;
-	epoch_call(net_epoch_preempt, &d->epoch_ctx, bpfd_free);
+	NET_EPOCH_CALL(bpfd_free, &d->epoch_ctx);
 }
 
 static struct bpf_program_buffer*
@@ -2036,8 +2036,7 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_lo
 	BPFD_UNLOCK(d);
 
 	if (fcode != NULL)
-		epoch_call(net_epoch_preempt, &fcode->epoch_ctx,
-		    bpf_program_buffer_free);
+		NET_EPOCH_CALL(bpf_program_buffer_free, &fcode->epoch_ctx);
 
 	if (track_event)
 		EVENTHANDLER_INVOKE(bpf_track,

Modified: head/sys/net/if.c
==============================================================================
--- head/sys/net/if.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/net/if.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -654,7 +654,7 @@ if_free(struct ifnet *ifp)
 	IFNET_WUNLOCK();
 
 	if (refcount_release(&ifp->if_refcount))
-		epoch_call(net_epoch_preempt, &ifp->if_epoch_ctx, if_destroy);
+		NET_EPOCH_CALL(if_destroy, &ifp->if_epoch_ctx);
 	CURVNET_RESTORE();
 }
 
@@ -677,7 +677,7 @@ if_rele(struct ifnet *ifp)
 
 	if (!refcount_release(&ifp->if_refcount))
 		return;
-	epoch_call(net_epoch_preempt, &ifp->if_epoch_ctx, if_destroy);
+	NET_EPOCH_CALL(if_destroy, &ifp->if_epoch_ctx);
 }
 
 void
@@ -1826,7 +1826,7 @@ ifa_free(struct ifaddr *ifa)
 {
 
 	if (refcount_release(&ifa->ifa_refcnt))
-		epoch_call(net_epoch_preempt, &ifa->ifa_epoch_ctx, ifa_destroy);
+		NET_EPOCH_CALL(ifa_destroy, &ifa->ifa_epoch_ctx);
 }
 
 
@@ -3410,7 +3410,7 @@ if_freemulti(struct ifmultiaddr *ifma)
 	KASSERT(ifma->ifma_refcount == 0, ("if_freemulti_epoch: refcount %d",
 	    ifma->ifma_refcount));
 
-	epoch_call(net_epoch_preempt, &ifma->ifma_epoch_ctx, if_destroymulti);
+	NET_EPOCH_CALL(if_destroymulti, &ifma->ifma_epoch_ctx);
 }
 
 

Modified: head/sys/net/if_gre.c
==============================================================================
--- head/sys/net/if_gre.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/net/if_gre.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -392,7 +392,7 @@ gre_delete_tunnel(struct gre_softc *sc)
 	if ((gs = sc->gre_so) != NULL && CK_LIST_EMPTY(&gs->list)) {
 		CK_LIST_REMOVE(gs, chain);
 		soclose(gs->so);
-		epoch_call(net_epoch_preempt, &gs->epoch_ctx, gre_sofree);
+		NET_EPOCH_CALL(gre_sofree, &gs->epoch_ctx);
 		sc->gre_so = NULL;
 	}
 	GRE2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;

Modified: head/sys/net/if_lagg.c
==============================================================================
--- head/sys/net/if_lagg.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/net/if_lagg.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -910,7 +910,7 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport
 	 * free port and release it's ifnet reference after a grace period has
 	 * elapsed.
 	 */
-	epoch_call(net_epoch_preempt, &lp->lp_epoch_ctx, lagg_port_destroy_cb);
+	NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx);
 	/* Update lagg capabilities */
 	lagg_capabilities(sc);
 	lagg_linkstate(sc);

Modified: head/sys/net/if_vlan.c
==============================================================================
--- head/sys/net/if_vlan.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/net/if_vlan.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -584,7 +584,7 @@ vlan_setmulti(struct ifnet *ifp)
 	while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) {
 		CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
 		(void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr);
-		epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free);
+		NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx);
 	}
 
 	/* Now program new ones. */
@@ -1543,7 +1543,7 @@ vlan_unconfig_locked(struct ifnet *ifp, int departing)
 					    error);
 			}
 			CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
-			epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free);
+			NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx);
 		}
 
 		vlan_setflags(ifp, 0); /* clear special flags on parent */

Modified: head/sys/netinet/in.c
==============================================================================
--- head/sys/netinet/in.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netinet/in.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -1087,7 +1087,7 @@ in_lltable_destroy_lle(struct llentry *lle)
 {
 
 	LLE_WUNLOCK(lle);
-	epoch_call(net_epoch_preempt,  &lle->lle_epoch_ctx, in_lltable_destroy_lle_unlocked);
+	NET_EPOCH_CALL(in_lltable_destroy_lle_unlocked, &lle->lle_epoch_ctx);
 }
 
 static struct llentry *
@@ -1348,7 +1348,7 @@ in_lltable_alloc(struct lltable *llt, u_int flags, con
 		linkhdrsize = LLE_MAX_LINKHDR;
 		if (lltable_calc_llheader(ifp, AF_INET, IF_LLADDR(ifp),
 		    linkhdr, &linkhdrsize, &lladdr_off) != 0) {
-			epoch_call(net_epoch_preempt,  &lle->lle_epoch_ctx, in_lltable_destroy_lle_unlocked);
+			NET_EPOCH_CALL(in_lltable_destroy_lle_unlocked, &lle->lle_epoch_ctx);
 			return (NULL);
 		}
 		lltable_set_entry_addr(ifp, lle, linkhdr, linkhdrsize,

Modified: head/sys/netinet/in_pcb.c
==============================================================================
--- head/sys/netinet/in_pcb.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netinet/in_pcb.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -269,8 +269,7 @@ in_pcblbgroup_free(struct inpcblbgroup *grp)
 {
 
 	CK_LIST_REMOVE(grp, il_list);
-	epoch_call(net_epoch_preempt, &grp->il_epoch_ctx,
-	    in_pcblbgroup_free_deferred);
+	NET_EPOCH_CALL(in_pcblbgroup_free_deferred, &grp->il_epoch_ctx);
 }
 
 static struct inpcblbgroup *
@@ -1663,7 +1662,7 @@ in_pcbfree(struct inpcb *inp)
 	/* mark as destruction in progress */
 	inp->inp_flags2 |= INP_FREED;
 	INP_WUNLOCK(inp);
-	epoch_call(net_epoch_preempt, &inp->inp_epoch_ctx, in_pcbfree_deferred);
+	NET_EPOCH_CALL(in_pcbfree_deferred, &inp->inp_epoch_ctx);
 }
 
 /*
@@ -1704,7 +1703,7 @@ in_pcbdrop(struct inpcb *inp)
 		CK_LIST_REMOVE(inp, inp_portlist);
 		if (CK_LIST_FIRST(&phd->phd_pcblist) == NULL) {
 			CK_LIST_REMOVE(phd, phd_hash);
-			epoch_call(net_epoch_preempt, &phd->phd_epoch_ctx, inpcbport_free);
+			NET_EPOCH_CALL(inpcbport_free, &phd->phd_epoch_ctx);
 		}
 		INP_HASH_WUNLOCK(inp->inp_pcbinfo);
 		inp->inp_flags &= ~INP_INHASHLIST;
@@ -2674,7 +2673,7 @@ in_pcbremlists(struct inpcb *inp)
 		CK_LIST_REMOVE(inp, inp_portlist);
 		if (CK_LIST_FIRST(&phd->phd_pcblist) == NULL) {
 			CK_LIST_REMOVE(phd, phd_hash);
-			epoch_call(net_epoch_preempt, &phd->phd_epoch_ctx, inpcbport_free);
+			NET_EPOCH_CALL(inpcbport_free, &phd->phd_epoch_ctx);
 		}
 		INP_HASH_WUNLOCK(pcbinfo);
 		inp->inp_flags &= ~INP_INHASHLIST;

Modified: head/sys/netinet/ip_gre.c
==============================================================================
--- head/sys/netinet/ip_gre.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netinet/ip_gre.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -235,7 +235,7 @@ in_gre_udp_input(struct mbuf *m, int off, struct inpcb
 	 * If socket was closed before we have entered NET_EPOCH section,
 	 * INP_FREED flag should be set. Otherwise it should be safe to
 	 * make access to ctx data, because gre_so will be freed by
-	 * gre_sofree() via epoch_call().
+	 * gre_sofree() via NET_EPOCH_CALL().
 	 */
 	if (__predict_false(inp->inp_flags2 & INP_FREED)) {
 		NET_EPOCH_EXIT(et);
@@ -284,8 +284,7 @@ in_gre_setup_socket(struct gre_softc *sc)
 			if (CK_LIST_EMPTY(&gs->list)) {
 				CK_LIST_REMOVE(gs, chain);
 				soclose(gs->so);
-				epoch_call(net_epoch_preempt, &gs->epoch_ctx,
-				    gre_sofree);
+				NET_EPOCH_CALL(gre_sofree, &gs->epoch_ctx);
 			}
 			gs = sc->gre_so = NULL;
 		}

Modified: head/sys/netinet/tcp_ratelimit.c
==============================================================================
--- head/sys/netinet/tcp_ratelimit.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netinet/tcp_ratelimit.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -286,7 +286,7 @@ rs_defer_destroy(struct tcp_rate_set *rs)
 
 	/* Set flag to only defer once. */
 	rs->rs_flags |= RS_FUNERAL_SCHD;
-	epoch_call(net_epoch_preempt, &rs->rs_epoch_ctx, rs_destroy);
+	NET_EPOCH_CALL(rs_destroy, &rs->rs_epoch_ctx);
 }
 
 #ifdef INET

Modified: head/sys/netinet6/in6.c
==============================================================================
--- head/sys/netinet6/in6.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netinet6/in6.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -2061,7 +2061,7 @@ in6_lltable_destroy_lle(struct llentry *lle)
 {
 
 	LLE_WUNLOCK(lle);
-	epoch_call(net_epoch_preempt,  &lle->lle_epoch_ctx, in6_lltable_destroy_lle_unlocked);
+	NET_EPOCH_CALL(in6_lltable_destroy_lle_unlocked, &lle->lle_epoch_ctx);
 }
 
 static struct llentry *
@@ -2282,7 +2282,7 @@ in6_lltable_alloc(struct lltable *llt, u_int flags,
 		linkhdrsize = LLE_MAX_LINKHDR;
 		if (lltable_calc_llheader(ifp, AF_INET6, IF_LLADDR(ifp),
 		    linkhdr, &linkhdrsize, &lladdr_off) != 0) {
-			epoch_call(net_epoch_preempt,  &lle->lle_epoch_ctx, in6_lltable_destroy_lle_unlocked);
+			NET_EPOCH_CALL(in6_lltable_destroy_lle_unlocked, &lle->lle_epoch_ctx);
 			return (NULL);
 		}
 		lltable_set_entry_addr(ifp, lle, linkhdr, linkhdrsize,

Modified: head/sys/netinet6/ip6_gre.c
==============================================================================
--- head/sys/netinet6/ip6_gre.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netinet6/ip6_gre.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -228,7 +228,7 @@ in6_gre_udp_input(struct mbuf *m, int off, struct inpc
 	 * If socket was closed before we have entered NET_EPOCH section,
 	 * INP_FREED flag should be set. Otherwise it should be safe to
 	 * make access to ctx data, because gre_so will be freed by
-	 * gre_sofree() via epoch_call().
+	 * gre_sofree() via NET_EPOCH_CALL().
 	 */
 	if (__predict_false(inp->inp_flags2 & INP_FREED)) {
 		NET_EPOCH_EXIT(et);
@@ -280,8 +280,7 @@ in6_gre_setup_socket(struct gre_softc *sc)
 			if (CK_LIST_EMPTY(&gs->list)) {
 				CK_LIST_REMOVE(gs, chain);
 				soclose(gs->so);
-				epoch_call(net_epoch_preempt, &gs->epoch_ctx,
-				    gre_sofree);
+				NET_EPOCH_CALL(gre_sofree, &gs->epoch_ctx);
 			}
 			gs = sc->gre_so = NULL;
 		}

Modified: head/sys/netpfil/ipfw/nat64/nat64lsn.c
==============================================================================
--- head/sys/netpfil/ipfw/nat64/nat64lsn.c	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/netpfil/ipfw/nat64/nat64lsn.c	Wed Jan 15 06:05:20 2020	(r356755)
@@ -75,7 +75,7 @@ MALLOC_DEFINE(M_NAT64LSN, "NAT64LSN", "NAT64LSN");
 #define	NAT64LSN_EPOCH_ENTER(et)  NET_EPOCH_ENTER(et)
 #define	NAT64LSN_EPOCH_EXIT(et)   NET_EPOCH_EXIT(et)
 #define	NAT64LSN_EPOCH_ASSERT()   NET_EPOCH_ASSERT()
-#define	NAT64LSN_EPOCH_CALL(c, f) epoch_call(net_epoch_preempt, (c), (f))
+#define	NAT64LSN_EPOCH_CALL(c, f) NET_EPOCH_CALL((f), (c))
 
 static uma_zone_t nat64lsn_host_zone;
 static uma_zone_t nat64lsn_pgchunk_zone;

Modified: head/sys/sys/epoch.h
==============================================================================
--- head/sys/sys/epoch.h	Wed Jan 15 05:48:36 2020	(r356754)
+++ head/sys/sys/epoch.h	Wed Jan 15 06:05:20 2020	(r356755)
@@ -101,6 +101,7 @@ extern epoch_t net_epoch_preempt;
 #define	NET_EPOCH_ENTER(et)	epoch_enter_preempt(net_epoch_preempt, &(et))
 #define	NET_EPOCH_EXIT(et)	epoch_exit_preempt(net_epoch_preempt, &(et))
 #define	NET_EPOCH_WAIT()	epoch_wait_preempt(net_epoch_preempt)
+#define	NET_EPOCH_CALL(f, c)	epoch_call(net_epoch_preempt, (c), (f))
 #define	NET_EPOCH_ASSERT()	MPASS(in_epoch(net_epoch_preempt))
 
 #endif	/* _KERNEL */


More information about the svn-src-all mailing list