svn commit: r187927 - projects/cambria/sys/arm/xscale/ixp425
Sam Leffler
sam at FreeBSD.org
Fri Jan 30 09:44:11 PST 2009
Author: sam
Date: Fri Jan 30 17:44:10 2009
New Revision: 187927
URL: http://svn.freebsd.org/changeset/base/187927
Log:
checkpoint work; still nowhere close
Modified:
projects/cambria/sys/arm/xscale/ixp425/ixp4xx_crypto.c
Modified: projects/cambria/sys/arm/xscale/ixp425/ixp4xx_crypto.c
==============================================================================
--- projects/cambria/sys/arm/xscale/ixp425/ixp4xx_crypto.c Fri Jan 30 17:29:26 2009 (r187926)
+++ projects/cambria/sys/arm/xscale/ixp425/ixp4xx_crypto.c Fri Jan 30 17:44:10 2009 (r187927)
@@ -99,29 +99,10 @@ __FBSDID("$FreeBSD$");
#define MOD_AES192 (0x0900 | 6) /* 192-bit key = 6 words */
#define MOD_AES256 (0x0a00 | 8) /* 256-bit key = 8 words */
-#define CTL_FLAG_UNUSED 0x0000
-#define CTL_FLAG_USED 0x1000
-#define CTL_FLAG_PERFORM_ABLK 0x0001
-#define CTL_FLAG_GEN_ICV 0x0002
-#define CTL_FLAG_GEN_REVAES 0x0004
-#define CTL_FLAG_PERFORM_AEAD 0x0008
-#define CTL_FLAG_MASK 0x000f
-
-#define NPE_MAXSEG 3 /* empirically selected */
-
-struct npehwbuf {
- struct { /* NPE shared area, cacheline aligned */
- uint32_t next; /* phys addr of next segment */
- uint32_t len; /* buffer/segment length (bytes) */
- uint32_t data; /* phys addr of data segment */
- uint32_t pad[5]; /* pad to cacheline */
- } ne[NPE_MAXSEG];
-};
-
-#define NPE_QM_Q_ADDR(e) ((e)&0xffffffff8) /* phys address */
-#define NPE_QM_Q_OK(e) (((e)&1) == 0) /* cmd status */
-
-struct npehwctx { /* h/w crypto context */
+/*
+ * H/W crypto context, one per request sent to the NPE.
+ */
+struct npehwctx {
uint8_t op; /* npe operation */
uint8_t init_len;
uint16_t pad;
@@ -140,8 +121,49 @@ struct npehwctx { /* h/w crypto contex
uint16_t cipher_len; /* cipher data length */
uint32_t aad_addr; /* Additional Auth Data addr for CCM */
uint32_t ctx; /* phys addr of NPE crypto context */
+
+ /* driver extension, not used by NPE */
+ struct cryptop *crp; /* associated crypto operation */
+ struct npehwctx *next; /* free list */
};
+#define NPE_QM_Q_ADDR(e) ((e)&0xffffffff8) /* phys address */
+#define NPE_QM_Q_OK(e) (((e)&1) == 0) /* cmd status */
+
+#define NPE_MAXCTX 64
+#define NPE_MAXCTX_BYTES 64
+
+/*
+ * H/W operand buffers, used to hold in+out parameters
+ * attached to a context (see src, dst above). Scatter/gather
+ * of data is possible but w/ a max # segments (empirically
+ * selected). Fragmented data are copied as needed.
+ */
+struct npehwbuf {
+ struct { /* NPE shared area, cacheline aligned */
+ uint32_t next; /* phys addr of next segment */
+ uint32_t len; /* buffer/segment length (bytes) */
+ uint32_t data; /* phys addr of data segment */
+ uint32_t pad[5]; /* pad to cacheline */
+#define NPE_MAXSEG 3 /* empirically selected */
+ } ne[NPE_MAXSEG];
+};
+
+struct ixpcrypto_softc;
+
+/*
+ * Driver companion to npehwbuf (1-1).
+ */
+struct npebuf {
+ bus_dmamap_t map; /* bus dma map for associated data */
+ struct npehwbuf *hw; /* associated h/w block */
+ uint32_t neaddr; /* phys address of hw->ne */
+ struct ixpcrypto_softc *sc;
+};
+
+/*
+ * Crypto session state.
+ */
struct ixpcrypto_session {
TAILQ_ENTRY(ixpcrypto_session) next;
uint32_t id;
@@ -153,14 +175,6 @@ struct ixpcrypto_session {
uint8_t iv[16];
};
-struct npebuf {
- struct npebuf *next; /* chain to next buffer */
- bus_dmamap_t map; /* bus dma map for associated data */
- struct npehwbuf *hw; /* associated h/w block */
- uint32_t neaddr; /* phys address of hw->ne */
- struct cryptop *crp; /* associated crypto operation */
-};
-
struct ixpcrypto_softc {
device_t dev;
int debug; /* debug msg flags */
@@ -169,14 +183,21 @@ struct ixpcrypto_softc {
struct rwlock sessions_lock; /* lock over session table */
TAILQ_HEAD(ixpcrypto_sessions_head, ixpcrypto_session) sessions;
struct ixpnpe_softc *npe; /* handle on NPE engine */
+
+ struct npehwctx *hwctx; /* NPE h/w crypto ctx */
+ bus_dma_tag_t ctx_tag; /* tag+map for NPE crypto ctx */
+ bus_dmamap_t ctx_map;
+ bus_addr_t ctx_phys; /* phys addr of crypto ctx */
+ struct npehwctx *ctx; /* crypto ctx free list */
+
bus_dma_tag_t dtag; /* bus dma tag for mapped data */
- struct npehwbuf *hwbuf; /* NPE h/w buffers */
- bus_dma_tag_t buf_tag; /* tag+map for NPE cmd buffers */
+ struct npehwbuf *hwbuf; /* NPE h/w operand buffers */
+ bus_dma_tag_t buf_tag; /* tag+map for NPE operand buffers */
bus_dmamap_t buf_map;
bus_addr_t buf_phys; /* phys addr of h/w buffers */
- struct npebuf *buf; /* cmd buffers (1-1 w/ h/w) */
- struct npebuf *free; /* list of free cmd buffers */
- struct mtx mtx; /* lock over cmd buffer list */
+ struct npebuf *buf; /* operand buffers (1-1 w/ h/w) */
+
+ struct mtx mtx; /* lock over operation state */
int cmd_qid; /* qid for submitting cmds */
int cmddone_qid; /* qid cmds return on */
};
@@ -195,10 +216,10 @@ TUNABLE_INT("hw.ixpcrypto.npe", &ixpcryp
if (sc->debug >= n) device_printf(sc->dev, fmt, __VA_ARGS__); \
} while (0)
-static int ixpcrypto_cmdbuf = 64; /* # cmd buffers to allocate */
-SYSCTL_INT(_hw_ixpcrypto, OID_AUTO, cmdbuf, CTLFLAG_RD, &ixpcrypto_cmdbuf,
- 0, "cmd buffers allocated");
-TUNABLE_INT("hw.ixpcrypto.cmdbuf", &ixpcrypto_cmdbuf);
+static int ixpcrypto_qlen = 16; /* NPE cmd queue len */
+SYSCTL_INT(_hw_ixpcrypto, OID_AUTO, qlen, CTLFLAG_RD, &ixpcrypto_qlen,
+ 0, "cmd queue length");
+TUNABLE_INT("hw.ixpcrypto.qlen", &ixpcrypto_qlen);
static int ixpcrypto_dma_setup(struct ixpcrypto_softc *);
static void ixpcrypto_dma_destroy(struct ixpcrypto_softc *);
@@ -251,12 +272,11 @@ ixpcrypto_attach(device_t dev)
}
sc->cmd_qid = 29;
- ixpqmgr_qconfig(sc->cmd_qid, ixpcrypto_cmdbuf, 0,
- ixpcrypto_cmdbuf, 0, NULL, sc);
+ ixpqmgr_qconfig(sc->cmd_qid, 2*ixpcrypto_qlen, 0,
+ 2*ixpcrypto_qlen, 0, NULL, sc);
sc->cmddone_qid = 30;
- KASSERT(ixpcrypto_cmdbuf > 2*4, ("%d cmd buffers", ixpcrypto_cmdbuf));
- ixpqmgr_qconfig(sc->cmddone_qid, ixpcrypto_cmdbuf/4, 0, 2,
+ ixpqmgr_qconfig(sc->cmddone_qid, ixpcrypto_qlen/2, 0, 2,
IX_QMGR_Q_SOURCE_ID_NOT_E, ixpcrypto_cmddone, sc);
sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
@@ -342,6 +362,43 @@ ixpcrypto_dma_setup(struct ixpcrypto_sof
mtx_init(&sc->mtx, "ixpcrypto", NULL, MTX_DEF);
+ /* DMA tag and map for the NPE buffers */
+ error = bus_dma_tag_create(bus_get_dma_tag(device_get_parent(sc->dev)),
+ sizeof(uint32_t), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ ixpcrypto_qlen * sizeof(struct npehwctx), 1,
+ ixpcrypto_qlen * sizeof(struct npehwctx), 0,
+ NULL, NULL, &sc->ctx_tag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "unable to create npehwctx dma tag, error %u\n", error);
+ return error;
+ }
+ /* XXX COHERENT for now */
+ if (bus_dmamem_alloc(sc->ctx_tag, (void **)&sc->hwctx,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->ctx_map) != 0) {
+ device_printf(sc->dev,
+ "unable to allocate memory for h/w ctx, error %u\n",
+ error);
+ return error;
+ }
+ /* NB: use npe_getaddr and copy phys address */
+ if (bus_dmamap_load(sc->ctx_tag, sc->ctx_map,
+ sc->hwctx, ixpcrypto_qlen*sizeof(struct npehwctx), npe_getaddr, sc, 0) != 0) {
+ device_printf(sc->dev,
+ "unable to map memory for crypto ctx, error %u\n", error);
+ return error;
+ }
+ sc->ctx_phys = sc->buf_phys;
+
+ sc->ctx = NULL;
+ for (i = 0; i < ixpcrypto_qlen; i++) {
+ struct npehwctx *ctx = &sc->hwctx[i];
+ ctx->next = sc->ctx;
+ sc->ctx = ctx;
+ }
+
/* DMA tag for mapped mbufs */
error = bus_dma_tag_create(bus_get_dma_tag(device_get_parent(sc->dev)),
1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
@@ -353,12 +410,12 @@ ixpcrypto_dma_setup(struct ixpcrypto_sof
return error;
}
- /* DMA tag and map for the NPE buffers */
+ /* DMA tag and map for the operand buffers */
error = bus_dma_tag_create(bus_get_dma_tag(device_get_parent(sc->dev)),
sizeof(uint32_t), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL,
- ixpcrypto_cmdbuf * sizeof(struct npehwbuf), 1,
- ixpcrypto_cmdbuf * sizeof(struct npehwbuf), 0,
+ 2*ixpcrypto_qlen * sizeof(struct npehwbuf), 1,
+ 2*ixpcrypto_qlen * sizeof(struct npehwbuf), 0,
NULL, NULL, &sc->buf_tag);
if (error != 0) {
device_printf(sc->dev,
@@ -374,21 +431,22 @@ ixpcrypto_dma_setup(struct ixpcrypto_sof
error);
return error;
}
- sc->buf = malloc(ixpcrypto_cmdbuf * sizeof(struct npebuf),
- M_IXPCRYPTO, M_NOWAIT | M_ZERO);
- if (sc->buf == NULL) {
+ if (bus_dmamap_load(sc->buf_tag, sc->buf_map,
+ sc->hwbuf, 2*ixpcrypto_qlen*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
device_printf(sc->dev,
- "unable to allocate memory for s/w buffers\n");
+ "unable to map memory for h/w buffers, error %u\n", error);
return error;
}
- if (bus_dmamap_load(sc->buf_tag, sc->buf_map,
- sc->hwbuf, ixpcrypto_cmdbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
+
+ sc->buf = malloc(2*ixpcrypto_qlen * sizeof(struct npebuf),
+ M_IXPCRYPTO, M_NOWAIT | M_ZERO);
+ if (sc->buf == NULL) {
device_printf(sc->dev,
- "unable to map memory for h/w buffers, error %u\n", error);
+ "unable to allocate memory for s/w buffers\n");
return error;
}
/* NB: sc->buf_phys set by npe_getaddr */
- for (i = 0; i < ixpcrypto_cmdbuf; i++) {
+ for (i = 0; i < 2*ixpcrypto_qlen; i++) {
struct npebuf *npe = &sc->buf[i];
struct npehwbuf *hw = &sc->hwbuf[i];
@@ -405,9 +463,7 @@ ixpcrypto_dma_setup(struct ixpcrypto_sof
return error;
}
npe->hw = hw;
-
- npe->next = sc->free;
- sc->free = npe;
+ npe->sc = sc;
}
bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREWRITE);
return 0;
@@ -419,7 +475,7 @@ ixpcrypto_dma_destroy(struct ixpcrypto_s
int i;
if (sc->hwbuf != NULL) {
- for (i = 0; i < ixpcrypto_cmdbuf; i++) {
+ for (i = 0; i < 2*ixpcrypto_qlen; i++) {
struct npebuf *npe = &sc->buf[i];
bus_dmamap_destroy(sc->dtag, npe->map);
}
@@ -432,6 +488,12 @@ ixpcrypto_dma_destroy(struct ixpcrypto_s
bus_dma_tag_destroy(sc->buf_tag);
if (sc->dtag)
bus_dma_tag_destroy(sc->dtag);
+ if (sc->hwctx != NULL) {
+ bus_dmamap_unload(sc->ctx_tag, sc->ctx_map);
+ bus_dmamem_free(sc->ctx_tag, sc->hwctx, sc->ctx_map);
+ }
+ if (sc->ctx_tag)
+ bus_dma_tag_destroy(sc->ctx_tag);
mtx_destroy(&sc->mtx);
}
@@ -532,17 +594,43 @@ ixpcrypto_freesession(device_t dev, uint
return EINVAL;
}
+static __inline struct npehwctx *
+ctx_alloc(struct ixpcrypto_softc *sc)
+{
+ struct npehwctx *ctx;
+
+ mtx_lock(&sc->mtx);
+ ctx = sc->ctx;
+ if (ctx != NULL)
+ sc->ctx = ctx->next;
+ mtx_unlock(&sc->mtx);
+ return ctx;
+}
+
+static __inline void
+ctx_free(struct ixpcrypto_softc *sc, struct npehwctx *ctx)
+{
+
+ mtx_lock(&sc->mtx);
+ ctx->next = sc->ctx;
+ sc->ctx = ctx;
+ mtx_unlock(&sc->mtx);
+}
+
static void
ixpcrypto_cb(void *arg,
bus_dma_segment_t *segs, int nsegs, bus_size_t len, int error)
{
struct npebuf *npe = arg;
+ struct ixpcrypto_softc *sc = npe->sc;
+ struct npehwctx *ctx;
struct npehwbuf *hw;
uint32_t next;
- int i;
+ int i, ix;
if (error != 0)
return;
+
hw = npe->hw;
next = npe->neaddr + sizeof(hw->ne[0]);
for (i = 0; i < nsegs; i++) {
@@ -554,6 +642,16 @@ ixpcrypto_cb(void *arg,
next += sizeof(hw->ne[0]);
}
hw->ne[i-1].next = 0; /* zero last in chain */
+
+ bus_dmamap_sync(sc->dtag, npe->map, BUS_DMASYNC_PREWRITE);
+ /* XXX flush descriptor instead of using uncached memory */
+
+ ix = npe - sc->buf;
+ ctx = &sc->hwctx[ix];
+ ctx->src = htobe32(npe->neaddr);
+
+ /* dispatch to NPE */
+ ixpqmgr_qwrite(sc->cmd_qid, sc->ctx_phys + (ix * sizeof(*ctx)));
}
static int
@@ -562,20 +660,21 @@ ixpcrypto_process(device_t dev, struct c
struct ixpcrypto_softc *sc = device_get_softc(dev);
struct ixpcrypto_session *ses = NULL;
struct cryptodesc *crd, *enccrd, *maccrd;
+ struct npehwctx *npectx;
struct npebuf *npe;
+ uint32_t cfgword;
int error = 0;
- enccrd = maccrd = NULL;
-
- /* Sanity check. */
- if (crp == NULL)
- return EINVAL;
+ KASSERT(crp != NULL, ("null crp"));
+ KASSERT(crp->crp_callback != NULL, ("null callback"));
+ KASSERT(crp->crp_desc != NULL, ("null desc"));
- if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
+ if ((crp->crp_flags & (CRYPTO_F_IMBUF | CRYPTO_F_IOV)) == 0) {
error = EINVAL;
- goto out;
+ goto done;
}
+ enccrd = maccrd = NULL;
for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
switch (crd->crd_alg) {
case CRYPTO_NULL_HMAC:
@@ -592,7 +691,12 @@ ixpcrypto_process(device_t dev, struct c
}
maccrd = crd;
break;
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
case CRYPTO_AES_CBC:
+#ifdef CRYPTO_AES_CTR_CBC
+ case CRYPTO_AES_CTR_CBC:
+#endif
if (enccrd != NULL) {
error = EINVAL;
goto out;
@@ -603,10 +707,6 @@ ixpcrypto_process(device_t dev, struct c
return EINVAL;
}
}
- if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
- error = EINVAL;
- goto out;
- }
rw_rlock(&sc->sessions_lock);
TAILQ_FOREACH_REVERSE(ses, &sc->sessions, ixpcrypto_sessions_head, next) {
@@ -616,49 +716,72 @@ ixpcrypto_process(device_t dev, struct c
rw_runlock(&sc->sessions_lock);
if (ses == NULL) {
error = EINVAL;
- goto out;
+ goto done;
}
- mtx_lock(&sc->mtx);
- npe = sc->free;
- if (npe != NULL)
- sc->free = npe->next;
- mtx_unlock(&sc->mtx);
- if (npe == NULL) {
+ /* allocate an npe context for the request */
+ npectx = ctx_alloc(sc);
+ if (npectx == NULL) {
error = ENOBUFS;
- goto out;
+ goto bad;
}
-
- npe->crp = crp;
+ npectx->crp = crp;
+ cfgword = 0;
+ if (maccrd != NULL) {
+ }
+ if (enccrd != NULL) {
+ switch (enccrd->crd_alg) {
+ case CRYPTO_DES_CBC:
+ cfgword |= ;
+ break;
+ case CRYPTO_3DES_CBC:
+ cfgword |= ;
+ break;
+ case CRYPTO_AES_CBC:
+ cfgword |= ;
+ switch (enccrd->crd_klen) {
+ case 128:
+ case 192:
+ case 256:
+ }
+ break;
+#ifdef CRYPTO_AES_CTR_CBC
+ case CRYPTO_AES_CTR_CBC:
+ cfgword |= ;
+ break;
+#endif
+ }
+ if (enccrd->crd_flags & CRD_F_ENCRYPT)
+ cfgword |= ;
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(npectx->iv, crd_iv, 16);
+ else
+ memcpy(npectx->iv, ses->iv, 16);
+ if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
+ else
+ }
+ /*
+ * Assign operand buffers; NB: we pre-assign src+dst
+ * so there's no need to do it under the mtx.
+ * XXX assign and setup dst buffer
+ */
+ npe = &sc->buf[npectx - sc->hwctx];
if (crp->crp_flags & CRYPTO_F_IMBUF) {
error = bus_dmamap_load_mbuf(sc->dtag, npe->map,
- (struct mbuf *) crp->crp_buf,
- ixpcrypto_cb, npe, BUS_DMA_NOWAIT);
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ (struct mbuf *) crp->crp_buf, ixpcrypto_cb, npe, 0);
+ if (error)
+ goto bad;
+ } else {
error = bus_dmamap_load_uio(sc->dtag, npe->map,
- (struct uio *) crp->crp_buf,
- ixpcrypto_cb, npe, BUS_DMA_NOWAIT);
- } else
- error = EINVAL;
- if (error != 0) {
- device_printf(sc->dev, "%s: error %u\n", __func__, error);
- mtx_lock(&sc->mtx);
- npe->next = sc->free;
- sc->free = npe;
- mtx_unlock(&sc->mtx);
- goto out;
+ (struct uio *) crp->crp_buf, ixpcrypto_cb, npe, 0);
+ if (error)
+ goto bad;
}
-
- bus_dmamap_sync(sc->dtag, npe->map, BUS_DMASYNC_PREWRITE);
- /* XXX flush descriptor instead of using uncached memory */
-
- DPRINTF(sc, "%s: qwrite(%u, 0x%x) data %x len 0x%x\n",
- __func__, sc->cmd_qid, npe->neaddr,
- npe->hw->ne[0].data, npe->hw->ne[0].len);
- /* stick it on the cmd q */
- ixpqmgr_qwrite(sc->cmd_qid, npe->neaddr);
- return 0;
-out:
+ return 0; /* NB: success, work completes in cb */
+bad:
+ ctx_free(sc, npectx)
+ /* fall thru... */
+done:
crp->crp_etype = error;
crypto_done(crp);
return error;
@@ -667,34 +790,27 @@ out:
static void
ixpcrypto_cmddone(int qid, void *arg)
{
-#define P2V(a, sc) \
- &(sc)->buf[((a) - (sc)->buf_phys) / sizeof(struct npehwbuf)]
+#define P2CTX(a, sc) \
+ &(sc)->hwctx[((a) - (sc)->ctx_phys) / sizeof(struct npehwctx)]
+#define P2BUF(a, sc) \
+ &(sc)->hwctx[((a) - (sc)->ctx_phys) / sizeof(struct npehwctx)]
struct ixpcrypto_softc *sc = arg;
uint32_t entry;
- struct npebuf *head;
- struct npebuf **tail;
- struct npebuf *npe;
- head = NULL;
- tail = &head;
while (ixpqmgr_qread(qid, &entry) == 0) {
- npe = P2V(NPE_QM_Q_ADDR(entry), sc);
-
- /* XXX optimize based on request */
- bus_dmamap_sync(sc->dtag, npe->map,
- BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
+ struct npehwctx *ctx = P2CTX(NPE_QM_Q_ADDR(entry), sc);
- /* XXX copyback iv */
- crypto_done(npe->crp);
-
- *tail = npe;
- tail = &npe->next;
+ if (ctx->op == data coming back) {
+ struct npebuf *dst = P2BUF(ctx->dst, sc);
+ bus_dmamap_sync(sc->dtag, dst->map,
+ BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
+ }
+ crypto_done(ctx->crp);
+ /* XXX optimize? */
+ ctx_free(sc, ctx);
}
- mtx_lock(&sc->mtx);
- *tail = sc->free;
- sc->free = head;
- mtx_unlock(&sc->mtx);
-#undef P2V
+#undef P2BUF
+#undef P2CTX
}
static device_method_t ixpcrypto_methods[] = {
More information about the svn-src-projects
mailing list