svn commit: r256289 - user/andre/mbuf_staging/kern
Andre Oppermann
andre at FreeBSD.org
Thu Oct 10 19:24:12 UTC 2013
Author: andre
Date: Thu Oct 10 19:24:11 2013
New Revision: 256289
URL: http://svnweb.freebsd.org/changeset/base/256289
Log:
Move all remaining mbuf allocation functions under one umbrella into
kern/kern_mbuf.c from kern/uipc_mbuf.c.
Modified:
user/andre/mbuf_staging/kern/kern_mbuf.c
user/andre/mbuf_staging/kern/uipc_mbuf.c
Modified: user/andre/mbuf_staging/kern/kern_mbuf.c
==============================================================================
--- user/andre/mbuf_staging/kern/kern_mbuf.c Thu Oct 10 19:10:21 2013 (r256288)
+++ user/andre/mbuf_staging/kern/kern_mbuf.c Thu Oct 10 19:24:11 2013 (r256289)
@@ -772,6 +772,36 @@ m_getcl(int how, short type, int flags)
return (uma_zalloc_arg(zone_pack, &args, how));
}
+/*
+ * m_getjcl() returns an mbuf with a cluster of the specified size attached.
+ * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
+ */
+struct mbuf *
+m_getjcl(int how, short type, int flags, int size)
+{
+ struct mb_args args;
+ struct mbuf *m, *n;
+ uma_zone_t zone;
+
+ if (size == MCLBYTES)
+ return m_getcl(how, type, flags);
+
+ args.flags = flags;
+ args.type = type;
+
+ m = uma_zalloc_arg(zone_mbuf, &args, how);
+ if (m == NULL)
+ return (NULL);
+
+ zone = m_getzone(size);
+ n = uma_zalloc_arg(zone, m, how);
+ if (n == NULL) {
+ uma_zfree(zone_mbuf, m);
+ return (NULL);
+ }
+ return (m);
+}
+
void
m_clget(struct mbuf *m, int how)
{
@@ -852,6 +882,262 @@ m_cljset(struct mbuf *m, void *cl, int t
}
/*
+ * m_get2() allocates minimum mbuf that would fit "size" argument.
+ */
+struct mbuf *
+m_get2(int size, int how, short type, int flags)
+{
+ struct mb_args args;
+ struct mbuf *m, *n;
+
+ args.flags = flags;
+ args.type = type;
+
+ if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
+ return (uma_zalloc_arg(zone_mbuf, &args, how));
+ if (size <= MCLBYTES)
+ return (uma_zalloc_arg(zone_pack, &args, how));
+
+ if (size > MJUMPAGESIZE)
+ return (NULL);
+
+ m = uma_zalloc_arg(zone_mbuf, &args, how);
+ if (m == NULL)
+ return (NULL);
+
+ n = uma_zalloc_arg(zone_jumbop, m, how);
+ if (n == NULL) {
+ uma_zfree(zone_mbuf, m);
+ return (NULL);
+ }
+
+ return (m);
+}
+
+/*
+ * Allocate a given length worth of mbufs and/or clusters (whatever fits
+ * best) and return a pointer to the top of the allocated chain. If an
+ * existing mbuf chain is provided, then we will append the new chain
+ * to the existing one but still return the top of the newly allocated
+ * chain.
+ */
+struct mbuf *
+m_getm2(struct mbuf *m, int len, int how, short type, int flags)
+{
+ struct mbuf *mb, *nm = NULL, *mtail = NULL;
+
+ KASSERT(len >= 0, ("%s: len is < 0", __func__));
+
+ /* Validate flags. */
+ flags &= (M_PKTHDR | M_EOR);
+
+ /* Packet header mbuf must be first in chain. */
+ if ((flags & M_PKTHDR) && m != NULL)
+ flags &= ~M_PKTHDR;
+
+ /* Loop and append maximum sized mbufs to the chain tail. */
+ while (len > 0) {
+ if (len > MCLBYTES)
+ mb = m_getjcl(how, type, (flags & M_PKTHDR),
+ MJUMPAGESIZE);
+ else if (len >= MINCLSIZE)
+ mb = m_getcl(how, type, (flags & M_PKTHDR));
+ else if (flags & M_PKTHDR)
+ mb = m_gethdr(how, type);
+ else
+ mb = m_get(how, type);
+
+ /* Fail the whole operation if one mbuf can't be allocated. */
+ if (mb == NULL) {
+ if (nm != NULL)
+ m_freem(nm);
+ return (NULL);
+ }
+
+ /* Book keeping. */
+ len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
+ ((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
+ if (mtail != NULL)
+ mtail->m_next = mb;
+ else
+ nm = mb;
+ mtail = mb;
+ flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
+ }
+ if (flags & M_EOR)
+ mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
+
+ /* If mbuf was supplied, append new chain to the end of it. */
+ if (m != NULL) {
+ for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
+ ;
+ mtail->m_next = nm;
+ mtail->m_flags &= ~M_EOR;
+ } else
+ m = nm;
+
+ return (m);
+}
+
+/*
+ * Free an entire chain of mbufs and associated external buffers, if
+ * applicable.
+ */
+void
+m_freem(struct mbuf *mb)
+{
+
+ while (mb != NULL)
+ mb = m_free(mb);
+}
+
+/*-
+ * Configure a provided mbuf to refer to the provided external storage
+ * buffer and setup a reference count for said buffer. If the setting
+ * up of the reference count fails, the M_EXT bit will not be set. If
+ * successfull, the M_EXT bit is set in the mbuf's flags.
+ *
+ * Arguments:
+ * mb The existing mbuf to which to attach the provided buffer.
+ * buf The address of the provided external storage buffer.
+ * size The size of the provided buffer.
+ * freef A pointer to a routine that is responsible for freeing the
+ * provided external storage buffer.
+ * args A pointer to an argument structure (of any type) to be passed
+ * to the provided freef routine (may be NULL).
+ * flags Any other flags to be passed to the provided mbuf.
+ * type The type that the external storage buffer should be
+ * labeled with.
+ *
+ * Returns:
+ * Nothing.
+ */
+int
+m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
+ int (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2,
+ int flags, int type, int wait)
+{
+ KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
+
+ if (type != EXT_EXTREF)
+ mb->m_ext.ref_cnt = uma_zalloc(zone_ext_refcnt, wait);
+
+ if (mb->m_ext.ref_cnt == NULL)
+ return (ENOMEM);
+
+ *(mb->m_ext.ref_cnt) = 1;
+ mb->m_flags |= (M_EXT | flags);
+ mb->m_ext.ext_buf = buf;
+ mb->m_data = mb->m_ext.ext_buf;
+ mb->m_ext.ext_size = size;
+ mb->m_ext.ext_free = freef;
+ mb->m_ext.ext_arg1 = arg1;
+ mb->m_ext.ext_arg2 = arg2;
+ mb->m_ext.ext_type = type;
+ mb->m_ext.ext_flags = 0;
+
+ return (0);
+}
+
+/*
+ * Associated an external reference counted buffer with an mbuf.
+ */
+void
+m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt,
+ int (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2)
+{
+
+ KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__));
+
+ atomic_add_int(ref_cnt, 1);
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_buf = buf;
+ m->m_ext.ref_cnt = ref_cnt;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_free = freef;
+ m->m_ext.ext_arg1 = arg1;
+ m->m_ext.ext_arg2 = arg2;
+ m->m_ext.ext_type = EXT_EXTREF;
+}
+
+/*
+ * Non-directly-exported function to clean up after mbufs with M_EXT
+ * storage attached to them if the reference count hits 1.
+ */
+void
+mb_free_ext(struct mbuf *m)
+{
+ int skipmbuf;
+
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+
+ /*
+ * check if the header is embedded in the cluster
+ */
+ skipmbuf = (m->m_flags & M_NOFREE);
+
+ /* Free attached storage if this mbuf is the only reference to it. */
+ if (*(m->m_ext.ref_cnt) == 1 ||
+ atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
+ switch (m->m_ext.ext_type) {
+ case EXT_PACKET: /* The packet zone is special. */
+ if (*(m->m_ext.ref_cnt) == 0)
+ *(m->m_ext.ref_cnt) = 1;
+ uma_zfree(zone_pack, m);
+ return; /* Job done. */
+ case EXT_CLUSTER:
+ uma_zfree(zone_clust, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBOP:
+ uma_zfree(zone_jumbop, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBO9:
+ uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBO16:
+ uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
+ break;
+ case EXT_SFBUF:
+ case EXT_NET_DRV:
+ case EXT_MOD_TYPE:
+ case EXT_DISPOSABLE:
+ *(m->m_ext.ref_cnt) = 0;
+ uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
+ m->m_ext.ref_cnt));
+ /* FALLTHROUGH */
+ case EXT_EXTREF:
+ KASSERT(m->m_ext.ext_free != NULL,
+ ("%s: ext_free not set", __func__));
+ (void)(*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1,
+ m->m_ext.ext_arg2);
+ break;
+ default:
+ KASSERT(m->m_ext.ext_type == 0,
+ ("%s: unknown ext_type", __func__));
+ }
+ }
+ if (skipmbuf)
+ return;
+
+ /*
+ * Free this mbuf back to the mbuf zone with all m_ext
+ * information purged.
+ */
+ m->m_ext.ext_buf = NULL;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_arg1 = NULL;
+ m->m_ext.ext_arg2 = NULL;
+ m->m_ext.ref_cnt = NULL;
+ m->m_ext.ext_size = 0;
+ m->m_ext.ext_type = 0;
+ m->m_ext.ext_flags = 0;
+ m->m_flags &= ~M_EXT;
+ uma_zfree(zone_mbuf, m);
+}
+
+/*
* This is the protocol drain routine.
*
* No locks should be held when this is called. The drain routines have to
Modified: user/andre/mbuf_staging/kern/uipc_mbuf.c
==============================================================================
--- user/andre/mbuf_staging/kern/uipc_mbuf.c Thu Oct 10 19:10:21 2013 (r256288)
+++ user/andre/mbuf_staging/kern/uipc_mbuf.c Thu Oct 10 19:24:11 2013 (r256289)
@@ -93,292 +93,6 @@ CTASSERT(MSIZE - offsetof(struct mbuf, m
CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
/*
- * m_get2() allocates minimum mbuf that would fit "size" argument.
- */
-struct mbuf *
-m_get2(int size, int how, short type, int flags)
-{
- struct mb_args args;
- struct mbuf *m, *n;
-
- args.flags = flags;
- args.type = type;
-
- if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
- return (uma_zalloc_arg(zone_mbuf, &args, how));
- if (size <= MCLBYTES)
- return (uma_zalloc_arg(zone_pack, &args, how));
-
- if (size > MJUMPAGESIZE)
- return (NULL);
-
- m = uma_zalloc_arg(zone_mbuf, &args, how);
- if (m == NULL)
- return (NULL);
-
- n = uma_zalloc_arg(zone_jumbop, m, how);
- if (n == NULL) {
- uma_zfree(zone_mbuf, m);
- return (NULL);
- }
-
- return (m);
-}
-
-/*
- * m_getjcl() returns an mbuf with a cluster of the specified size attached.
- * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
- */
-struct mbuf *
-m_getjcl(int how, short type, int flags, int size)
-{
- struct mb_args args;
- struct mbuf *m, *n;
- uma_zone_t zone;
-
- if (size == MCLBYTES)
- return m_getcl(how, type, flags);
-
- args.flags = flags;
- args.type = type;
-
- m = uma_zalloc_arg(zone_mbuf, &args, how);
- if (m == NULL)
- return (NULL);
-
- zone = m_getzone(size);
- n = uma_zalloc_arg(zone, m, how);
- if (n == NULL) {
- uma_zfree(zone_mbuf, m);
- return (NULL);
- }
- return (m);
-}
-
-/*
- * Allocate a given length worth of mbufs and/or clusters (whatever fits
- * best) and return a pointer to the top of the allocated chain. If an
- * existing mbuf chain is provided, then we will append the new chain
- * to the existing one but still return the top of the newly allocated
- * chain.
- */
-struct mbuf *
-m_getm2(struct mbuf *m, int len, int how, short type, int flags)
-{
- struct mbuf *mb, *nm = NULL, *mtail = NULL;
-
- KASSERT(len >= 0, ("%s: len is < 0", __func__));
-
- /* Validate flags. */
- flags &= (M_PKTHDR | M_EOR);
-
- /* Packet header mbuf must be first in chain. */
- if ((flags & M_PKTHDR) && m != NULL)
- flags &= ~M_PKTHDR;
-
- /* Loop and append maximum sized mbufs to the chain tail. */
- while (len > 0) {
- if (len > MCLBYTES)
- mb = m_getjcl(how, type, (flags & M_PKTHDR),
- MJUMPAGESIZE);
- else if (len >= MINCLSIZE)
- mb = m_getcl(how, type, (flags & M_PKTHDR));
- else if (flags & M_PKTHDR)
- mb = m_gethdr(how, type);
- else
- mb = m_get(how, type);
-
- /* Fail the whole operation if one mbuf can't be allocated. */
- if (mb == NULL) {
- if (nm != NULL)
- m_freem(nm);
- return (NULL);
- }
-
- /* Book keeping. */
- len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
- ((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
- if (mtail != NULL)
- mtail->m_next = mb;
- else
- nm = mb;
- mtail = mb;
- flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
- }
- if (flags & M_EOR)
- mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
-
- /* If mbuf was supplied, append new chain to the end of it. */
- if (m != NULL) {
- for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
- ;
- mtail->m_next = nm;
- mtail->m_flags &= ~M_EOR;
- } else
- m = nm;
-
- return (m);
-}
-
-/*
- * Free an entire chain of mbufs and associated external buffers, if
- * applicable.
- */
-void
-m_freem(struct mbuf *mb)
-{
-
- while (mb != NULL)
- mb = m_free(mb);
-}
-
-/*-
- * Configure a provided mbuf to refer to the provided external storage
- * buffer and setup a reference count for said buffer. If the setting
- * up of the reference count fails, the M_EXT bit will not be set. If
- * successfull, the M_EXT bit is set in the mbuf's flags.
- *
- * Arguments:
- * mb The existing mbuf to which to attach the provided buffer.
- * buf The address of the provided external storage buffer.
- * size The size of the provided buffer.
- * freef A pointer to a routine that is responsible for freeing the
- * provided external storage buffer.
- * args A pointer to an argument structure (of any type) to be passed
- * to the provided freef routine (may be NULL).
- * flags Any other flags to be passed to the provided mbuf.
- * type The type that the external storage buffer should be
- * labeled with.
- *
- * Returns:
- * Nothing.
- */
-int
-m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
- int (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2,
- int flags, int type, int wait)
-{
- KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
-
- if (type != EXT_EXTREF)
- mb->m_ext.ref_cnt = uma_zalloc(zone_ext_refcnt, wait);
-
- if (mb->m_ext.ref_cnt == NULL)
- return (ENOMEM);
-
- *(mb->m_ext.ref_cnt) = 1;
- mb->m_flags |= (M_EXT | flags);
- mb->m_ext.ext_buf = buf;
- mb->m_data = mb->m_ext.ext_buf;
- mb->m_ext.ext_size = size;
- mb->m_ext.ext_free = freef;
- mb->m_ext.ext_arg1 = arg1;
- mb->m_ext.ext_arg2 = arg2;
- mb->m_ext.ext_type = type;
- mb->m_ext.ext_flags = 0;
-
- return (0);
-}
-
-/*
- * Associated an external reference counted buffer with an mbuf.
- */
-void
-m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt,
- int (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2)
-{
-
- KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__));
-
- atomic_add_int(ref_cnt, 1);
- m->m_flags |= M_EXT;
- m->m_ext.ext_buf = buf;
- m->m_ext.ref_cnt = ref_cnt;
- m->m_data = m->m_ext.ext_buf;
- m->m_ext.ext_size = size;
- m->m_ext.ext_free = freef;
- m->m_ext.ext_arg1 = arg1;
- m->m_ext.ext_arg2 = arg2;
- m->m_ext.ext_type = EXT_EXTREF;
-}
-
-/*
- * Non-directly-exported function to clean up after mbufs with M_EXT
- * storage attached to them if the reference count hits 1.
- */
-void
-mb_free_ext(struct mbuf *m)
-{
- int skipmbuf;
-
- KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
- KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
-
- /*
- * check if the header is embedded in the cluster
- */
- skipmbuf = (m->m_flags & M_NOFREE);
-
- /* Free attached storage if this mbuf is the only reference to it. */
- if (*(m->m_ext.ref_cnt) == 1 ||
- atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
- switch (m->m_ext.ext_type) {
- case EXT_PACKET: /* The packet zone is special. */
- if (*(m->m_ext.ref_cnt) == 0)
- *(m->m_ext.ref_cnt) = 1;
- uma_zfree(zone_pack, m);
- return; /* Job done. */
- case EXT_CLUSTER:
- uma_zfree(zone_clust, m->m_ext.ext_buf);
- break;
- case EXT_JUMBOP:
- uma_zfree(zone_jumbop, m->m_ext.ext_buf);
- break;
- case EXT_JUMBO9:
- uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
- break;
- case EXT_JUMBO16:
- uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
- break;
- case EXT_SFBUF:
- case EXT_NET_DRV:
- case EXT_MOD_TYPE:
- case EXT_DISPOSABLE:
- *(m->m_ext.ref_cnt) = 0;
- uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
- m->m_ext.ref_cnt));
- /* FALLTHROUGH */
- case EXT_EXTREF:
- KASSERT(m->m_ext.ext_free != NULL,
- ("%s: ext_free not set", __func__));
- (void)(*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1,
- m->m_ext.ext_arg2);
- break;
- default:
- KASSERT(m->m_ext.ext_type == 0,
- ("%s: unknown ext_type", __func__));
- }
- }
- if (skipmbuf)
- return;
-
- /*
- * Free this mbuf back to the mbuf zone with all m_ext
- * information purged.
- */
- m->m_ext.ext_buf = NULL;
- m->m_ext.ext_free = NULL;
- m->m_ext.ext_arg1 = NULL;
- m->m_ext.ext_arg2 = NULL;
- m->m_ext.ref_cnt = NULL;
- m->m_ext.ext_size = 0;
- m->m_ext.ext_type = 0;
- m->m_ext.ext_flags = 0;
- m->m_flags &= ~M_EXT;
- uma_zfree(zone_mbuf, m);
-}
-
-/*
* Attach the cluster from *m to *n, set up m_ext in *n
* and bump the refcount of the cluster.
*/
More information about the svn-src-user
mailing list