git: e9da71cd35d4 - main - vtnet: Better adjust for ethernet alignment.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 28 Dec 2023 06:25:57 UTC
The branch main has been updated by imp: URL: https://cgit.FreeBSD.org/src/commit/?id=e9da71cd35d46ca13da4396d99e0af1703290e68 commit e9da71cd35d46ca13da4396d99e0af1703290e68 Author: Warner Losh <imp@FreeBSD.org> AuthorDate: 2023-12-21 20:36:12 +0000 Commit: Warner Losh <imp@FreeBSD.org> CommitDate: 2023-12-28 06:25:53 +0000 vtnet: Better adjust for ethernet alignment. Move adjustment of the mbuf from where we allocate it to where we are about to queue it to the device. Do this only on those platforms that require it. This allows us to receive an entire jumbo frame on other platforms. It also doesn't make the adjustment on subsequent frames when we queue mulitple mbufs for LRO operations. For the normal use case on armv7, there's no difference because we only ever allocate one mbuf. However, for the LRO cases it increases what's available in LRO. It also ensure that we get enough mbufs in those cases as well (though I have no ability to test this on a LRO scenario with armv7). This has the side effect of reverting 527b62e37e68. Fixes: 527b62e37e68 Sponsored by: Netflix --- sys/dev/virtio/network/if_vtnet.c | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c index 360176e4f845..db7a1a18ebc2 100644 --- a/sys/dev/virtio/network/if_vtnet.c +++ b/sys/dev/virtio/network/if_vtnet.c @@ -1532,8 +1532,8 @@ vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) m_freem(m_head); return (NULL); } + m->m_len = size; - m_adj(m, ETHER_ALIGN); if (m_head != NULL) { m_tail->m_next = m; m_tail = m; @@ -1587,6 +1587,15 @@ vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0, ("%s: mbuf size %d not expected cluster size %d", __func__, m->m_len, clustersz)); +#ifdef __NO_STRICT_ALIGNMENT + /* + * Need to offset the first mbuf in this chain to align the IP + * structure because this host requires strict alignment. + */ + if (m_prev == NULL) { + m_adj(m, ETHER_ALIGN); + } +#endif m->m_len = MIN(m->m_len, len); len -= m->m_len; @@ -1655,6 +1664,13 @@ vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) if (m_new == NULL) return (ENOBUFS); +#ifdef __NO_STRICT_ALIGNMENT + /* + * Need to offset the first mbuf in this chain to align the IP + * structure because this host requires strict alignment. + */ + m_adj(m_new, ETHER_ALIGN); +#endif error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { sc->vtnet_stats.rx_enq_replacement_failed++; @@ -1722,6 +1738,13 @@ vtnet_rxq_new_buf(struct vtnet_rxq *rxq) if (m == NULL) return (ENOBUFS); +#ifdef __NO_STRICT_ALIGNMENT + /* + * Need to offset the first mbuf in this chain to align the IP + * structure because this host requires strict alignment. + */ + m_adj(m, ETHER_ALIGN); +#endif error = vtnet_rxq_enqueue_buf(rxq, m); if (error) m_freem(m); @@ -1907,8 +1930,10 @@ vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) int error __diagused; /* - * Requeue the discarded mbuf. This should always be successful - * since it was just dequeued. + * Requeue the discarded mbuf. This should always be successful since it + * was just dequeued. There's no need to adjust for ethernet alignment + * here on strict alignment hosts because we're requeueing a packet + * already adjusted. */ error = vtnet_rxq_enqueue_buf(rxq, m); KASSERT(error == 0,