svn commit: r360569 - in head/sys: dev/cxgbe dev/cxgbe/crypto dev/cxgbe/tom kern sys

Gleb Smirnoff glebius at FreeBSD.org
Sat May 2 22:39:29 UTC 2020


Author: glebius
Date: Sat May  2 22:39:26 2020
New Revision: 360569
URL: https://svnweb.freebsd.org/changeset/base/360569

Log:
  Continuation of multi page mbuf redesign from r359919.
  
  The following series of patches addresses three things:
  
  Now that array of pages is embedded into mbuf, we no longer need
  separate structure to pass around, so struct mbuf_ext_pgs is an
  artifact of the first implementation. And struct mbuf_ext_pgs_data
  is a crutch to accomodate the main idea r359919 with minimal churn.
  
  Also, M_EXT of type EXT_PGS are just a synonym of M_NOMAP.
  
  The namespace for the newfeature is somewhat inconsistent and
  sometimes has a lengthy prefixes. In these patches we will
  gradually bring the namespace to "m_epg" prefix for all mbuf
  fields and most functions.
  
  Step 1 of 4:
  
   o Anonymize mbuf_ext_pgs_data, embed in m_ext
   o Embed mbuf_ext_pgs
   o Start documenting all this entanglement
  
  Reviewed by:	gallatin
  Differential Revision:	https://reviews.freebsd.org/D24598

Modified:
  head/sys/dev/cxgbe/crypto/t4_kern_tls.c
  head/sys/dev/cxgbe/t4_sge.c
  head/sys/dev/cxgbe/tom/t4_cpl_io.c
  head/sys/dev/cxgbe/tom/t4_tls.c
  head/sys/kern/kern_mbuf.c
  head/sys/kern/kern_sendfile.c
  head/sys/kern/subr_bus_dma.c
  head/sys/kern/subr_sglist.c
  head/sys/kern/uipc_ktls.c
  head/sys/kern/uipc_mbuf.c
  head/sys/sys/mbuf.h
  head/sys/sys/sglist.h

Modified: head/sys/dev/cxgbe/crypto/t4_kern_tls.c
==============================================================================
--- head/sys/dev/cxgbe/crypto/t4_kern_tls.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/dev/cxgbe/crypto/t4_kern_tls.c	Sat May  2 22:39:26 2020	(r360569)
@@ -906,7 +906,7 @@ ktls_tcp_payload_length(struct tlspcb *tlsp, struct mb
 
 	MBUF_EXT_PGS_ASSERT(m_tls);
 	ext_pgs = &m_tls->m_ext_pgs;
-	hdr = (void *)ext_pgs->m_epg_hdr;
+	hdr = (void *)m_tls->m_epg_hdr;
 	plen = ntohs(hdr->tls_length);
 
 	/*
@@ -962,7 +962,7 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *
 
 	MBUF_EXT_PGS_ASSERT(m_tls);
 	ext_pgs = &m_tls->m_ext_pgs;
-	hdr = (void *)ext_pgs->m_epg_hdr;
+	hdr = (void *)m_tls->m_epg_hdr;
 	plen = ntohs(hdr->tls_length);
 #ifdef INVARIANTS
 	mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
@@ -1040,7 +1040,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struc
 		return (wr_len);
 	}
 
-	hdr = (void *)ext_pgs->m_epg_hdr;
+	hdr = (void *)m_tls->m_epg_hdr;
 	plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - ext_pgs->trail_len;
 	if (tlen < plen) {
 		plen = tlen;
@@ -1064,7 +1064,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struc
 	wr_len += roundup2(imm_len, 16);
 
 	/* TLS record payload via DSGL. */
-	*nsegsp = sglist_count_ext_pgs(ext_pgs, ext_pgs->hdr_len + offset,
+	*nsegsp = sglist_count_ext_pgs(m_tls, ext_pgs->hdr_len + offset,
 	    plen - (ext_pgs->hdr_len + offset));
 	wr_len += ktls_sgl_size(*nsegsp);
 
@@ -1543,7 +1543,7 @@ ktls_write_tunnel_packet(struct sge_txq *txq, void *ds
 	    (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
 
 	/* Copy the subset of the TLS header requested. */
-	copy_to_txd(&txq->eq, (char *)ext_pgs->m_epg_hdr +
+	copy_to_txd(&txq->eq, (char *)m_tls->m_epg_hdr +
 	    mtod(m_tls, vm_offset_t), &out, m_tls->m_len);
 	txq->imm_wrs++;
 
@@ -1604,7 +1604,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq 
 	/* Locate the TLS header. */
 	MBUF_EXT_PGS_ASSERT(m_tls);
 	ext_pgs = &m_tls->m_ext_pgs;
-	hdr = (void *)ext_pgs->m_epg_hdr;
+	hdr = (void *)m_tls->m_epg_hdr;
 	plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - ext_pgs->trail_len;
 
 	/* Determine how much of the TLS record to send. */
@@ -1799,7 +1799,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq 
 
 	/* Recalculate 'nsegs' if cached value is not available. */
 	if (nsegs == 0)
-		nsegs = sglist_count_ext_pgs(ext_pgs, ext_pgs->hdr_len +
+		nsegs = sglist_count_ext_pgs(m_tls, ext_pgs->hdr_len +
 		    offset, plen - (ext_pgs->hdr_len + offset));
 
 	/* Calculate the size of the TLS work request. */
@@ -2031,7 +2031,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq 
 	/* Populate the TLS header */
 	out = (void *)(tx_data + 1);
 	if (offset == 0) {
-		memcpy(out, ext_pgs->m_epg_hdr, ext_pgs->hdr_len);
+		memcpy(out, m_tls->m_epg_hdr, ext_pgs->hdr_len);
 		out += ext_pgs->hdr_len;
 	}
 
@@ -2067,7 +2067,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq 
 
 	/* SGL for record payload */
 	sglist_reset(txq->gl);
-	if (sglist_append_ext_pgs(txq->gl, ext_pgs, ext_pgs->hdr_len + offset,
+	if (sglist_append_ext_pgs(txq->gl, m_tls, ext_pgs->hdr_len + offset,
 	    plen - (ext_pgs->hdr_len + offset)) != 0) {
 #ifdef INVARIANTS
 		panic("%s: failed to append sglist", __func__);

Modified: head/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- head/sys/dev/cxgbe/t4_sge.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/dev/cxgbe/t4_sge.c	Sat May  2 22:39:26 2020	(r360569)
@@ -2435,7 +2435,7 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_
 			off = 0;
 			len -= seglen;
 			paddr = pmap_kextract(
-			    (vm_offset_t)&ext_pgs->m_epg_hdr[segoff]);
+			    (vm_offset_t)&m->m_epg_hdr[segoff]);
 			if (*nextaddr != paddr)
 				nsegs++;
 			*nextaddr = paddr + seglen;
@@ -2454,7 +2454,7 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_
 		off = 0;
 		seglen = min(seglen, len);
 		len -= seglen;
-		paddr = ext_pgs->m_epg_pa[i] + segoff;
+		paddr = m->m_epg_pa[i] + segoff;
 		if (*nextaddr != paddr)
 			nsegs++;
 		*nextaddr = paddr + seglen;
@@ -2463,7 +2463,7 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_
 	if (len != 0) {
 		seglen = min(len, ext_pgs->trail_len - off);
 		len -= seglen;
-		paddr = pmap_kextract((vm_offset_t)&ext_pgs->m_epg_trail[off]);
+		paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]);
 		if (*nextaddr != paddr)
 			nsegs++;
 		*nextaddr = paddr + seglen;

Modified: head/sys/dev/cxgbe/tom/t4_cpl_io.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_cpl_io.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/dev/cxgbe/tom/t4_cpl_io.c	Sat May  2 22:39:26 2020	(r360569)
@@ -1935,7 +1935,7 @@ aiotx_free_pgs(struct mbuf *m)
 #endif
 
 	for (int i = 0; i < ext_pgs->npgs; i++) {
-		pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
+		pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
 		vm_page_unwire(pg, PQ_ACTIVE);
 	}
 
@@ -2003,7 +2003,7 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
 			    (npages - 2) * PAGE_SIZE;
 		}
 		for (i = 0; i < npages; i++)
-			ext_pgs->m_epg_pa[i] = VM_PAGE_TO_PHYS(pgs[i]);
+			m->m_epg_pa[i] = VM_PAGE_TO_PHYS(pgs[i]);
 
 		m->m_len = mlen;
 		m->m_ext.ext_size = npages * PAGE_SIZE;

Modified: head/sys/dev/cxgbe/tom/t4_tls.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_tls.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/dev/cxgbe/tom/t4_tls.c	Sat May  2 22:39:26 2020	(r360569)
@@ -1623,26 +1623,24 @@ t4_push_tls_records(struct adapter *sc, struct toepcb 
 
 #ifdef KERN_TLS
 static int
-count_ext_pgs_segs(struct mbuf_ext_pgs *ext_pgs,
-	struct mbuf_ext_pgs_data *ext_pgs_data)
+count_ext_pgs_segs(struct mbuf *m)
 {
 	vm_paddr_t nextpa;
 	u_int i, nsegs;
 
-	MPASS(ext_pgs->npgs > 0);
+	MPASS(m->m_ext_pgs.npgs > 0);
 	nsegs = 1;
-	nextpa = ext_pgs_data->pa[0] + PAGE_SIZE;
-	for (i = 1; i < ext_pgs->npgs; i++) {
-		if (nextpa != ext_pgs_data->pa[i])
+	nextpa = m->m_epg_pa[0] + PAGE_SIZE;
+	for (i = 1; i < m->m_ext_pgs.npgs; i++) {
+		if (nextpa != m->m_epg_pa[i])
 			nsegs++;
-		nextpa = ext_pgs_data->pa[i] + PAGE_SIZE;
+		nextpa = m->m_epg_pa[i] + PAGE_SIZE;
 	}
 	return (nsegs);
 }
 
 static void
-write_ktlstx_sgl(void *dst, struct mbuf_ext_pgs *ext_pgs,
-    struct mbuf_ext_pgs_data *ext_pgs_data, int nsegs)
+write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
 {
 	struct ulptx_sgl *usgl = dst;
 	vm_paddr_t pa;
@@ -1655,15 +1653,15 @@ write_ktlstx_sgl(void *dst, struct mbuf_ext_pgs *ext_p
 	    V_ULPTX_NSGE(nsegs));
 
 	/* Figure out the first S/G length. */
-	pa = ext_pgs_data->pa[0] + ext_pgs->first_pg_off;
+	pa = m->m_epg_pa[0] + m->m_ext_pgs.first_pg_off;
 	usgl->addr0 = htobe64(pa);
-	len = mbuf_ext_pg_len(ext_pgs, 0, ext_pgs->first_pg_off);
+	len = mbuf_ext_pg_len(&m->m_ext_pgs, 0, m->m_ext_pgs.first_pg_off);
 	pa += len;
-	for (i = 1; i < ext_pgs->npgs; i++) {
-		if (ext_pgs_data->pa[i] != pa)
+	for (i = 1; i < m->m_ext_pgs.npgs; i++) {
+		if (m->m_epg_pa[i] != pa)
 			break;
-		len += mbuf_ext_pg_len(ext_pgs, i, 0);
-		pa += mbuf_ext_pg_len(ext_pgs, i, 0);
+		len += mbuf_ext_pg_len(&m->m_ext_pgs, i, 0);
+		pa += mbuf_ext_pg_len(&m->m_ext_pgs, i, 0);
 	}
 	usgl->len0 = htobe32(len);
 #ifdef INVARIANTS
@@ -1671,21 +1669,21 @@ write_ktlstx_sgl(void *dst, struct mbuf_ext_pgs *ext_p
 #endif
 
 	j = -1;
-	for (; i < ext_pgs->npgs; i++) {
-		if (j == -1 || ext_pgs_data->pa[i] != pa) {
+	for (; i < m->m_ext_pgs.npgs; i++) {
+		if (j == -1 || m->m_epg_pa[i] != pa) {
 			if (j >= 0)
 				usgl->sge[j / 2].len[j & 1] = htobe32(len);
 			j++;
 #ifdef INVARIANTS
 			nsegs--;
 #endif
-			pa = ext_pgs_data->pa[i];
+			pa = m->m_epg_pa[i];
 			usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
-			len = mbuf_ext_pg_len(ext_pgs, i, 0);
+			len = mbuf_ext_pg_len(&m->m_ext_pgs, i, 0);
 			pa += len;
 		} else {
-			len += mbuf_ext_pg_len(ext_pgs, i, 0);
-			pa += mbuf_ext_pg_len(ext_pgs, i, 0);
+			len += mbuf_ext_pg_len(&m->m_ext_pgs, i, 0);
+			pa += mbuf_ext_pg_len(&m->m_ext_pgs, i, 0);
 		}
 	}
 	if (j >= 0) {
@@ -1694,8 +1692,7 @@ write_ktlstx_sgl(void *dst, struct mbuf_ext_pgs *ext_p
 		if ((j & 1) == 0)
 			usgl->sge[j / 2].len[1] = htobe32(0);
 	}
-	KASSERT(nsegs == 0, ("%s: nsegs %d, ext_pgs %p", __func__, nsegs,
-	    ext_pgs));
+	KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
 }
 
 /*
@@ -1813,8 +1810,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, 
 		wr_len += AES_BLOCK_LEN;
 
 		/* Account for SGL in work request length. */
-		nsegs = count_ext_pgs_segs(&m->m_ext_pgs,
-		    &m->m_ext.ext_pgs);
+		nsegs = count_ext_pgs_segs(m);
 		wr_len += sizeof(struct ulptx_sgl) +
 		    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
 
@@ -1892,8 +1888,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, 
 		memcpy(buf, thdr + 1, toep->tls.iv_len);
 		buf += AES_BLOCK_LEN;
 
-		write_ktlstx_sgl(buf, &m->m_ext_pgs, &m->m_ext.ext_pgs,
-		    nsegs);
+		write_ktlstx_sgl(buf, m, nsegs);
 
 		KASSERT(toep->tx_credits >= credits,
 			("%s: not enough credits", __func__));

Modified: head/sys/kern/kern_mbuf.c
==============================================================================
--- head/sys/kern/kern_mbuf.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/kern/kern_mbuf.c	Sat May  2 22:39:26 2020	(r360569)
@@ -311,9 +311,6 @@ static void	mb_reclaim(uma_zone_t, int);
 /* Ensure that MSIZE is a power of 2. */
 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
 
-_Static_assert(offsetof(struct mbuf, m_ext) ==
-    offsetof(struct mbuf, m_ext_pgs.m_ext),
-    "m_ext offset mismatch between mbuf and ext_pgs");
 _Static_assert(sizeof(struct mbuf) <= MSIZE,
     "size of mbuf exceeds MSIZE");
 /*
@@ -984,7 +981,7 @@ _mb_unmapped_to_ext(struct mbuf *m)
 				goto fail;
 			m_new->m_len = seglen;
 			prev = top = m_new;
-			memcpy(mtod(m_new, void *), &ext_pgs->m_epg_hdr[segoff],
+			memcpy(mtod(m_new, void *), &m->m_epg_hdr[segoff],
 			    seglen);
 		}
 	}
@@ -1002,7 +999,7 @@ _mb_unmapped_to_ext(struct mbuf *m)
 		seglen = min(seglen, len);
 		len -= seglen;
 
-		pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
+		pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
 		m_new = m_get(M_NOWAIT, MT_DATA);
 		if (m_new == NULL)
 			goto fail;
@@ -1036,7 +1033,7 @@ _mb_unmapped_to_ext(struct mbuf *m)
 		else
 			prev->m_next = m_new;
 		m_new->m_len = len;
-		memcpy(mtod(m_new, void *), &ext_pgs->m_epg_trail[off], len);
+		memcpy(mtod(m_new, void *), &m->m_epg_trail[off], len);
 	}
 
 	if (ref_inc != 0) {
@@ -1154,8 +1151,9 @@ mb_alloc_ext_pgs(int how, m_ext_free_t ext_free)
 
 #ifdef INVARIANT_SUPPORT
 void
-mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs)
+mb_ext_pgs_check(struct mbuf *m)
 {
+	struct mbuf_ext_pgs *ext_pgs = &m->m_ext_pgs;
 
 	/*
 	 * NB: This expects a non-empty buffer (npgs > 0 and
@@ -1163,7 +1161,7 @@ mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs)
 	 */
 	KASSERT(ext_pgs->npgs > 0,
 	    ("ext_pgs with no valid pages: %p", ext_pgs));
-	KASSERT(ext_pgs->npgs <= nitems(ext_pgs->m_epg_pa),
+	KASSERT(ext_pgs->npgs <= nitems(m->m_epg_pa),
 	    ("ext_pgs with too many pages: %p", ext_pgs));
 	KASSERT(ext_pgs->nrdy <= ext_pgs->npgs,
 	    ("ext_pgs with too many ready pages: %p", ext_pgs));
@@ -1178,9 +1176,9 @@ mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs)
 		    PAGE_SIZE, ("ext_pgs with single page too large: %p",
 		    ext_pgs));
 	}
-	KASSERT(ext_pgs->hdr_len <= sizeof(ext_pgs->m_epg_hdr),
+	KASSERT(ext_pgs->hdr_len <= sizeof(m->m_epg_hdr),
 	    ("ext_pgs with too large header length: %p", ext_pgs));
-	KASSERT(ext_pgs->trail_len <= sizeof(ext_pgs->m_epg_trail),
+	KASSERT(ext_pgs->trail_len <= sizeof(m->m_epg_trail),
 	    ("ext_pgs with too large header length: %p", ext_pgs));
 }
 #endif

Modified: head/sys/kern/kern_sendfile.c
==============================================================================
--- head/sys/kern/kern_sendfile.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/kern/kern_sendfile.c	Sat May  2 22:39:26 2020	(r360569)
@@ -203,7 +203,7 @@ sendfile_free_mext_pg(struct mbuf *m)
 	for (i = 0; i < ext_pgs->npgs; i++) {
 		if (cache_last && i == ext_pgs->npgs - 1)
 			flags = 0;
-		pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
+		pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
 		vm_page_release(pg, flags);
 	}
 
@@ -1046,11 +1046,11 @@ retry_space:
 					ext_pgs->nrdy++;
 				}
 
-				ext_pgs->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
+				m0->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
 				ext_pgs->npgs++;
 				xfs = xfsize(i, npages, off, space);
 				ext_pgs->last_pg_len = xfs;
-				MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs);
+				MBUF_EXT_PGS_ASSERT_SANITY(m0);
 				mtail->m_len += xfs;
 				mtail->m_ext.ext_size += PAGE_SIZE;
 				continue;

Modified: head/sys/kern/subr_bus_dma.c
==============================================================================
--- head/sys/kern/subr_bus_dma.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/kern/subr_bus_dma.c	Sat May  2 22:39:26 2020	(r360569)
@@ -141,7 +141,7 @@ _bus_dmamap_load_unmapped_mbuf_sg(bus_dma_tag_t dmat, 
 			off = 0;
 			len -= seglen;
 			error = _bus_dmamap_load_buffer(dmat, map,
-			    &ext_pgs->m_epg_hdr[segoff], seglen, kernel_pmap,
+			    &m->m_epg_hdr[segoff], seglen, kernel_pmap,
 			    flags, segs, nsegs);
 		}
 	}
@@ -159,7 +159,7 @@ _bus_dmamap_load_unmapped_mbuf_sg(bus_dma_tag_t dmat, 
 		seglen = min(seglen, len);
 		len -= seglen;
 		error = _bus_dmamap_load_phys(dmat, map,
-		    ext_pgs->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
+		    m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
 		pgoff = 0;
 	};
 	if (len != 0 && error == 0) {
@@ -167,7 +167,7 @@ _bus_dmamap_load_unmapped_mbuf_sg(bus_dma_tag_t dmat, 
 		    ("off + len > trail (%d + %d > %d)", off, len,
 		    ext_pgs->trail_len));
 		error = _bus_dmamap_load_buffer(dmat, map,
-		    &ext_pgs->m_epg_trail[off], len, kernel_pmap, flags, segs,
+		    &m->m_epg_trail[off], len, kernel_pmap, flags, segs,
 		    nsegs);
 	}
 	return (error);

Modified: head/sys/kern/subr_sglist.c
==============================================================================
--- head/sys/kern/subr_sglist.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/kern/subr_sglist.c	Sat May  2 22:39:26 2020	(r360569)
@@ -223,8 +223,9 @@ sglist_count_vmpages(vm_page_t *m, size_t pgoff, size_
  * describe an EXT_PGS buffer.
  */
 int
-sglist_count_ext_pgs(struct mbuf_ext_pgs *ext_pgs, size_t off, size_t len)
+sglist_count_ext_pgs(struct mbuf *m, size_t off, size_t len)
 {
+	struct mbuf_ext_pgs *ext_pgs = &m->m_ext_pgs;
 	vm_paddr_t nextaddr, paddr;
 	size_t seglen, segoff;
 	int i, nsegs, pglen, pgoff;
@@ -242,7 +243,7 @@ sglist_count_ext_pgs(struct mbuf_ext_pgs *ext_pgs, siz
 			seglen = MIN(seglen, len);
 			off = 0;
 			len -= seglen;
-			nsegs += sglist_count(&ext_pgs->m_epg_hdr[segoff],
+			nsegs += sglist_count(&m->m_epg_hdr[segoff],
 			    seglen);
 		}
 	}
@@ -260,7 +261,7 @@ sglist_count_ext_pgs(struct mbuf_ext_pgs *ext_pgs, siz
 		off = 0;
 		seglen = MIN(seglen, len);
 		len -= seglen;
-		paddr = ext_pgs->m_epg_pa[i] + segoff;
+		paddr = m->m_epg_pa[i] + segoff;
 		if (paddr != nextaddr)
 			nsegs++;
 		nextaddr = paddr + seglen;
@@ -269,7 +270,7 @@ sglist_count_ext_pgs(struct mbuf_ext_pgs *ext_pgs, siz
 	if (len != 0) {
 		seglen = MIN(len, ext_pgs->trail_len - off);
 		len -= seglen;
-		nsegs += sglist_count(&ext_pgs->m_epg_trail[off], seglen);
+		nsegs += sglist_count(&m->m_epg_trail[off], seglen);
 	}
 	KASSERT(len == 0, ("len != 0"));
 	return (nsegs);
@@ -284,8 +285,7 @@ sglist_count_mb_ext_pgs(struct mbuf *m)
 {
 
 	MBUF_EXT_PGS_ASSERT(m);
-	return (sglist_count_ext_pgs(&m->m_ext_pgs, mtod(m, vm_offset_t),
-	    m->m_len));
+	return (sglist_count_ext_pgs(m, mtod(m, vm_offset_t), m->m_len));
 }
 
 /*
@@ -395,9 +395,9 @@ sglist_append_phys(struct sglist *sg, vm_paddr_t paddr
  * fails with EFBIG.
  */
 int
-sglist_append_ext_pgs(struct sglist *sg, struct mbuf_ext_pgs *ext_pgs,
-    size_t off, size_t len)
+sglist_append_ext_pgs(struct sglist *sg, struct mbuf *m, size_t off, size_t len)
 {
+	struct mbuf_ext_pgs *ext_pgs = &m->m_ext_pgs;
 	size_t seglen, segoff;
 	vm_paddr_t paddr;
 	int error, i, pglen, pgoff;
@@ -413,7 +413,7 @@ sglist_append_ext_pgs(struct sglist *sg, struct mbuf_e
 			off = 0;
 			len -= seglen;
 			error = sglist_append(sg,
-			    &ext_pgs->m_epg_hdr[segoff], seglen);
+			    &m->m_epg_hdr[segoff], seglen);
 		}
 	}
 	pgoff = ext_pgs->first_pg_off;
@@ -429,7 +429,7 @@ sglist_append_ext_pgs(struct sglist *sg, struct mbuf_e
 		off = 0;
 		seglen = MIN(seglen, len);
 		len -= seglen;
-		paddr = ext_pgs->m_epg_pa[i] + segoff;
+		paddr = m->m_epg_pa[i] + segoff;
 		error = sglist_append_phys(sg, paddr, seglen);
 		pgoff = 0;
 	};
@@ -437,7 +437,7 @@ sglist_append_ext_pgs(struct sglist *sg, struct mbuf_e
 		seglen = MIN(len, ext_pgs->trail_len - off);
 		len -= seglen;
 		error = sglist_append(sg,
-		    &ext_pgs->m_epg_trail[off], seglen);
+		    &m->m_epg_trail[off], seglen);
 	}
 	if (error == 0)
 		KASSERT(len == 0, ("len != 0"));
@@ -455,8 +455,7 @@ sglist_append_mb_ext_pgs(struct sglist *sg, struct mbu
 
 	/* for now, all unmapped mbufs are assumed to be EXT_PGS */
 	MBUF_EXT_PGS_ASSERT(m);
-	return (sglist_append_ext_pgs(sg, &m->m_ext_pgs,
-	    mtod(m, vm_offset_t), m->m_len));
+	return (sglist_append_ext_pgs(sg, m, mtod(m, vm_offset_t), m->m_len));
 }
 
 /*

Modified: head/sys/kern/uipc_ktls.c
==============================================================================
--- head/sys/kern/uipc_ktls.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/kern/uipc_ktls.c	Sat May  2 22:39:26 2020	(r360569)
@@ -1374,7 +1374,7 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls,
 		m->m_len += pgs->hdr_len + pgs->trail_len;
 
 		/* Populate the TLS header. */
-		tlshdr = (void *)pgs->m_epg_hdr;
+		tlshdr = (void *)m->m_epg_hdr;
 		tlshdr->tls_vmajor = tls->params.tls_vmajor;
 
 		/*
@@ -1387,7 +1387,7 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls,
 			tlshdr->tls_type = TLS_RLTYPE_APP;
 			/* save the real record type for later */
 			pgs->record_type = record_type;
-			pgs->m_epg_trail[0] = record_type;
+			m->m_epg_trail[0] = record_type;
 		} else {
 			tlshdr->tls_vminor = tls->params.tls_vminor;
 			tlshdr->tls_type = record_type;
@@ -1552,7 +1552,7 @@ ktls_encrypt(struct mbuf_ext_pgs *pgs)
 			len = mbuf_ext_pg_len(pgs, i, off);
 			src_iov[i].iov_len = len;
 			src_iov[i].iov_base =
-			    (char *)(void *)PHYS_TO_DMAP(pgs->m_epg_pa[i]) +
+			    (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) +
 				off;
 
 			if (is_anon) {
@@ -1576,8 +1576,8 @@ retry_page:
 		npages += i;
 
 		error = (*tls->sw_encrypt)(tls,
-		    (const struct tls_record_layer *)pgs->m_epg_hdr,
-		    pgs->m_epg_trail, src_iov, dst_iov, i, pgs->seqno,
+		    (const struct tls_record_layer *)m->m_epg_hdr,
+		    m->m_epg_trail, src_iov, dst_iov, i, pgs->seqno,
 		    pgs->record_type);
 		if (error) {
 			counter_u64_add(ktls_offload_failed_crypto, 1);
@@ -1595,7 +1595,7 @@ retry_page:
 
 			/* Replace them with the new pages. */
 			for (i = 0; i < pgs->npgs; i++)
-				pgs->m_epg_pa[i] = parray[i];
+				m->m_epg_pa[i] = parray[i];
 
 			/* Use the basic free routine. */
 			m->m_ext.ext_free = mb_free_mext_pgs;

Modified: head/sys/kern/uipc_mbuf.c
==============================================================================
--- head/sys/kern/uipc_mbuf.c	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/kern/uipc_mbuf.c	Sat May  2 22:39:26 2020	(r360569)
@@ -163,11 +163,11 @@ CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
 #if defined(__LP64__)
 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
 CTASSERT(sizeof(struct pkthdr) == 56);
-CTASSERT(sizeof(struct m_ext) == 168);
+CTASSERT(sizeof(struct m_ext) == 160);
 #else
 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
 CTASSERT(sizeof(struct pkthdr) == 48);
-CTASSERT(sizeof(struct m_ext) == 184);
+CTASSERT(sizeof(struct m_ext) == 180);
 #endif
 
 /*
@@ -195,19 +195,30 @@ mb_dupcl(struct mbuf *n, struct mbuf *m)
 	KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
 
 	/*
-	 * Cache access optimization.  For most kinds of external
-	 * storage we don't need full copy of m_ext, since the
-	 * holder of the 'ext_count' is responsible to carry the
-	 * free routine and its arguments.  Exclusion is EXT_EXTREF,
-	 * where 'ext_cnt' doesn't point into mbuf at all.
+	 * Cache access optimization.
+	 *
+	 * o Regular M_EXT storage doesn't need full copy of m_ext, since
+	 *   the holder of the 'ext_count' is responsible to carry the free
+	 *   routine and its arguments.
+	 * o EXT_PGS data is split between main part of mbuf and m_ext, the
+	 *   main part is copied in full, the m_ext part is similar to M_EXT.
+	 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is
+	 *   special - it needs full copy of m_ext into each mbuf, since any
+	 *   copy could end up as the last to free.
 	 */
-	if (m->m_ext.ext_type == EXT_EXTREF)
-		bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
-	else if (m->m_ext.ext_type == EXT_PGS)
+	switch (m->m_ext.ext_type) {
+	case EXT_PGS:
+		bcopy(&m->m_ext, &n->m_ext, m_epg_copylen);
 		bcopy(&m->m_ext_pgs, &n->m_ext_pgs,
 		    sizeof(struct mbuf_ext_pgs));
-	else
+		break;
+	case EXT_EXTREF:
+		bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
+		break;
+	default:
 		bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
+	}
+
 	n->m_flags |= M_EXT;
 	n->m_flags |= m->m_flags & (M_RDONLY | M_NOMAP);
 
@@ -1623,7 +1634,7 @@ mb_free_mext_pgs(struct mbuf *m)
 	MBUF_EXT_PGS_ASSERT(m);
 	ext_pgs = &m->m_ext_pgs;
 	for (int i = 0; i < ext_pgs->npgs; i++) {
-		pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
+		pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
 		vm_page_unwire_noq(pg);
 		vm_page_free(pg);
 	}
@@ -1681,11 +1692,11 @@ retry_page:
 				}
 			}
 			pg_array[i]->flags &= ~PG_ZERO;
-			pgs->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
+			mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
 			pgs->npgs++;
 		}
 		pgs->last_pg_len = length - PAGE_SIZE * (pgs->npgs - 1);
-		MBUF_EXT_PGS_ASSERT_SANITY(pgs);
+		MBUF_EXT_PGS_ASSERT_SANITY(mb);
 		total -= length;
 		error = uiomove_fromphys(pg_array, 0, length, uio);
 		if (error != 0)
@@ -1788,7 +1799,8 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struc
 			seglen = min(seglen, len);
 			off = 0;
 			len -= seglen;
-			error = uiomove(&ext_pgs->m_epg_hdr[segoff], seglen, uio);
+			error = uiomove(__DECONST(void *,
+			    &m->m_epg_hdr[segoff]), seglen, uio);
 		}
 	}
 	pgoff = ext_pgs->first_pg_off;
@@ -1804,7 +1816,7 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struc
 		off = 0;
 		seglen = min(seglen, len);
 		len -= seglen;
-		pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
+		pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
 		error = uiomove_fromphys(&pg, segoff, seglen, uio);
 		pgoff = 0;
 	};
@@ -1812,7 +1824,8 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struc
 		KASSERT((off + len) <= ext_pgs->trail_len,
 		    ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
 		    ext_pgs->trail_len, m_off));
-		error = uiomove(&ext_pgs->m_epg_trail[off], len, uio);
+		error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
+		    len, uio);
 	}
 	return (error);
 }

Modified: head/sys/sys/mbuf.h
==============================================================================
--- head/sys/sys/mbuf.h	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/sys/mbuf.h	Sat May  2 22:39:26 2020	(r360569)
@@ -231,13 +231,6 @@ struct pkthdr {
 
 #define MBUF_PEXT_FLAG_ANON	1	/* Data can be encrypted in place. */
 
-
-struct mbuf_ext_pgs_data {
-	vm_paddr_t	pa[MBUF_PEXT_MAX_PGS];		/* phys addrs of pgs */
-	char		trail[MBUF_PEXT_TRAIL_LEN]; 	/* TLS trailer */
-	char		hdr[MBUF_PEXT_HDR_LEN];		/* TLS header */
-};
-
 struct ktls_session;
 struct socket;
 
@@ -266,49 +259,49 @@ struct m_ext {
 	uint32_t	 ext_size;	/* size of buffer, for ext_free */
 	uint32_t	 ext_type:8,	/* type of external storage */
 			 ext_flags:24;	/* external storage mbuf flags */
-	char		*ext_buf;	/* start of buffer */
-	/*
-	 * Fields below store the free context for the external storage.
-	 * They are valid only in the refcount carrying mbuf, the one with
-	 * EXT_FLAG_EMBREF flag, with exclusion for EXT_EXTREF type, where
-	 * the free context is copied into all mbufs that use same external
-	 * storage.
-	 */
-#define	m_ext_copylen	offsetof(struct m_ext, ext_free)
-	m_ext_free_t	*ext_free;	/* free routine if not the usual */
-	void		*ext_arg1;	/* optional argument pointer */
 	union {
-		void		*ext_arg2;	/* optional argument pointer */
-		struct mbuf_ext_pgs_data ext_pgs;
+		struct {
+			/*
+			 * Regular M_EXT mbuf:
+			 * o ext_buf always points to the external buffer.
+			 * o ext_free (below) and two optional arguments
+			 *   ext_arg1 and ext_arg2 store the free context for
+			 *   the external storage.  They are set only in the
+			 *   refcount carrying mbuf, the one with
+			 *   EXT_FLAG_EMBREF flag, with exclusion for
+			 *   EXT_EXTREF type, where the free context is copied
+			 *   into all mbufs that use same external storage.
+			 */
+			char 	*ext_buf;	/* start of buffer */
+#define	m_ext_copylen	offsetof(struct m_ext, ext_arg2)
+			void	*ext_arg2;
+		};
+		struct {
+			/*
+			 * Multi-page M_EXTPG mbuf:
+			 * o extpg_pa - page vector.
+			 * o extpg_trail and extpg_hdr - TLS trailer and
+			 *   header.
+			 * Uses ext_free and may also use ext_arg1.
+			 */
+			vm_paddr_t	extpg_pa[MBUF_PEXT_MAX_PGS];
+			char		extpg_trail[MBUF_PEXT_TRAIL_LEN];
+			char		extpg_hdr[MBUF_PEXT_HDR_LEN];
+			/* Pretend these 3 fields are part of mbuf itself. */
+#define	m_epg_pa	m_ext.extpg_pa
+#define	m_epg_trail	m_ext.extpg_trail
+#define	m_epg_hdr	m_ext.extpg_hdr
+#define	m_epg_copylen	offsetof(struct m_ext, ext_free)
+		};
 	};
+	/*
+	 * Free method and optional argument pointer, both
+	 * used by M_EXT and M_EXTPG.
+	 */
+	m_ext_free_t	*ext_free;
+	void		*ext_arg1;
 };
 
-struct mbuf_ext_pgs {
-	uint8_t		npgs;			/* Number of attached pages */
-	uint8_t		nrdy;			/* Pages with I/O pending */
-	uint8_t		hdr_len;		/* TLS header length */
-	uint8_t		trail_len;		/* TLS trailer length */
-	uint16_t	first_pg_off;		/* Offset into 1st page */
-	uint16_t	last_pg_len;		/* Length of last page */
-	uint8_t		flags;			/* Flags */
-	uint8_t		record_type;
-	uint8_t		spare[2];
-	int		enc_cnt;
-	struct ktls_session *tls;		/* TLS session */
-	struct socket	*so;
-	uint64_t	seqno;
-	struct mbuf	*mbuf;
-	STAILQ_ENTRY(mbuf_ext_pgs) stailq;
-#if !defined(__LP64__)
-	uint8_t		pad[8];		/* pad to size of pkthdr */
-#endif
-	struct m_ext	m_ext;
-};
-
-#define m_epg_hdr	m_ext.ext_pgs.hdr
-#define m_epg_trail	m_ext.ext_pgs.trail
-#define m_epg_pa	m_ext.ext_pgs.pa
-
 /*
  * The core of the mbuf object along with some shortcut defines for practical
  * purposes.
@@ -347,15 +340,48 @@ struct mbuf {
 	 * order to support future work on variable-size mbufs.
 	 */
 	union {
-		union {
-			struct {
-				struct pkthdr	m_pkthdr; /* M_PKTHDR set */
-				union {
-					struct m_ext	m_ext;	/* M_EXT set */
-					char		m_pktdat[0];
-				};
+		struct {
+			union {
+				/* M_PKTHDR set. */
+				struct pkthdr	m_pkthdr;
+
+				/* M_EXTPG set.
+				 * Multi-page M_EXTPG mbuf has its meta data
+				 * split between the mbuf_ext_pgs structure
+				 * and m_ext.  It carries vector of pages,
+				 * optional header and trailer char vectors
+				 * and pointers to socket/TLS data.
+				 */
+				struct mbuf_ext_pgs {
+					/* Overall count of pages and count of
+					 * pages with I/O pending. */
+					uint8_t	npgs;
+					uint8_t	nrdy;
+					/* TLS header and trailer lengths.
+					 * The data itself resides in m_ext. */
+					uint8_t	hdr_len;
+					uint8_t	trail_len;
+					/* Offset into 1st page and lenght of
+					 * data in the last page. */
+					uint16_t first_pg_off;
+					uint16_t last_pg_len;
+					uint8_t	flags;
+					uint8_t	record_type;
+					uint8_t	spare[2];
+					int	enc_cnt;
+					struct ktls_session *tls;
+					struct socket	*so;
+					uint64_t	seqno;
+					struct mbuf	*mbuf;
+					STAILQ_ENTRY(mbuf_ext_pgs) stailq;
+				} m_ext_pgs;
 			};
-			struct mbuf_ext_pgs m_ext_pgs;
+			union {
+				/* M_EXT or M_EXTPG set. */
+				struct m_ext	m_ext;
+				/* M_PKTHDR set, neither M_EXT nor M_EXTPG. */
+				char		m_pktdat[0];
+			};
 		};
 		char	m_dat[0];			/* !M_PKTHDR, !M_EXT */
 	};
@@ -375,12 +401,12 @@ mbuf_ext_pg_len(struct mbuf_ext_pgs *ext_pgs, int pidx
 }
 
 #ifdef INVARIANT_SUPPORT
-void	mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs);
+void	mb_ext_pgs_check(struct mbuf *m);
 #endif
 #ifdef INVARIANTS
-#define	MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs)	mb_ext_pgs_check((ext_pgs))
+#define	MBUF_EXT_PGS_ASSERT_SANITY(m)	mb_ext_pgs_check((m))
 #else
-#define	MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs)
+#define	MBUF_EXT_PGS_ASSERT_SANITY(m)
 #endif
 #endif
 

Modified: head/sys/sys/sglist.h
==============================================================================
--- head/sys/sys/sglist.h	Sat May  2 20:47:58 2020	(r360568)
+++ head/sys/sys/sglist.h	Sat May  2 22:39:26 2020	(r360569)
@@ -57,7 +57,6 @@ struct sglist {
 
 struct bio;
 struct mbuf;
-struct mbuf_ext_pgs;
 struct uio;
 
 static __inline void
@@ -88,8 +87,8 @@ sglist_hold(struct sglist *sg)
 struct sglist *sglist_alloc(int nsegs, int mflags);
 int	sglist_append(struct sglist *sg, void *buf, size_t len);
 int	sglist_append_bio(struct sglist *sg, struct bio *bp);
-int	sglist_append_ext_pgs(struct sglist *sg, struct mbuf_ext_pgs *ext_pgs,
-	    size_t off, size_t len);
+int	sglist_append_ext_pgs(struct sglist *sg, struct mbuf *m, size_t off,
+	    size_t len);
 int	sglist_append_mb_ext_pgs(struct sglist *sg, struct mbuf *m);
 int	sglist_append_mbuf(struct sglist *sg, struct mbuf *m0);
 int	sglist_append_phys(struct sglist *sg, vm_paddr_t paddr,
@@ -105,8 +104,7 @@ struct sglist *sglist_build(void *buf, size_t len, int
 struct sglist *sglist_clone(struct sglist *sg, int mflags);
 int	sglist_consume_uio(struct sglist *sg, struct uio *uio, size_t resid);
 int	sglist_count(void *buf, size_t len);
-int	sglist_count_ext_pgs(struct mbuf_ext_pgs *ext_pgs, size_t off,
-	    size_t len);
+int	sglist_count_ext_pgs(struct mbuf *m, size_t off, size_t len);
 int	sglist_count_mb_ext_pgs(struct mbuf *m);
 int	sglist_count_vmpages(vm_page_t *m, size_t pgoff, size_t len);
 void	sglist_free(struct sglist *sg);


More information about the svn-src-head mailing list