git: 789f2d4b3f33 - main - cxgbe tom: Remove support for non-KTLS TLS offload.

John Baldwin jhb at FreeBSD.org
Wed Jun 16 00:46:00 UTC 2021


The branch main has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=789f2d4b3f33d4414eaf0b4e7daef41e89d1b224

commit 789f2d4b3f33d4414eaf0b4e7daef41e89d1b224
Author:     John Baldwin <jhb at FreeBSD.org>
AuthorDate: 2021-05-28 23:49:56 +0000
Commit:     John Baldwin <jhb at FreeBSD.org>
CommitDate: 2021-06-16 00:45:26 +0000

    cxgbe tom: Remove support for non-KTLS TLS offload.
    
    TOE TLS offload was first supported via a customized OpenSSL developed
    by Chelsio with proprietary socket options prior to KTLS being present
    either in FreeBSD or upstream OpenSSL.  With the addition of KTLS in
    both places, cxgbe's TOE driver was extended to support TLS offload
    via KTLS as well.  This change removes the older interface leaving
    only the KTLS bindings for TOE TLS.
    
    Since KTLS was added to TOE TLS second, it was somehat shoe-horned
    into the existing code.  In addition to removing the non-KTLS TLS
    offload, refactor and simplify the code to assume KTLS, e.g. not
    copying keys into a helper structure that mimic'ed the non-KTLS mode,
    but using the KTLS session object directly when constructing key
    contexts.
    
    This also removes some unused code to send TX keys inline in work
    requests for TOE TLS.  This code was never enabled, and was arguably
    sending the wrong thing (it was not sending the raw key context as we
    do for NIC TLS when using inline keys).
    
    Sponsored by:   Chelsio Communications
---
 sys/dev/cxgbe/tom/t4_cpl_io.c |   15 -
 sys/dev/cxgbe/tom/t4_tls.c    | 1455 ++++++++---------------------------------
 sys/dev/cxgbe/tom/t4_tls.h    |  178 +----
 sys/dev/cxgbe/tom/t4_tom.c    |   20 -
 sys/dev/cxgbe/tom/t4_tom.h    |    4 -
 5 files changed, 294 insertions(+), 1378 deletions(-)

diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 732754d07f8f..a1bc88bdea7f 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -1170,12 +1170,8 @@ t4_push_data(struct adapter *sc, struct toepcb *toep, int drop)
 
 	if (ulp_mode(toep) == ULP_MODE_ISCSI)
 		t4_push_pdus(sc, toep, drop);
-	else if (tls_tx_key(toep) && toep->tls.mode == TLS_MODE_TLSOM)
-		t4_push_tls_records(sc, toep, drop);
-#ifdef KERN_TLS
 	else if (toep->flags & TPF_KTLS)
 		t4_push_ktls(sc, toep, drop);
-#endif
 	else
 		t4_push_frames(sc, toep, drop);
 }
@@ -1809,10 +1805,6 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 		credits -= txsd->tx_credits;
 		toep->tx_credits += txsd->tx_credits;
 		plen += txsd->plen;
-		if (txsd->iv_buffer) {
-			free(txsd->iv_buffer, M_CXGBE);
-			txsd->iv_buffer = NULL;
-		}
 		txsd++;
 		toep->txsd_avail++;
 		KASSERT(toep->txsd_avail <= toep->txsd_total,
@@ -1863,13 +1855,6 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 			    tid, plen);
 #endif
 			sbdrop_locked(sb, plen);
-			if (tls_tx_key(toep) &&
-			    toep->tls.mode == TLS_MODE_TLSOM) {
-				struct tls_ofld_info *tls_ofld = &toep->tls;
-
-				MPASS(tls_ofld->sb_off >= plen);
-				tls_ofld->sb_off -= plen;
-			}
 			if (!TAILQ_EMPTY(&toep->aiotx_jobq))
 				t4_aiotx_queue_toep(so, toep);
 			sowwakeup_locked(so);	/* unlocks so_snd */
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index bbd905d8acc3..be47dbac7ae5 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -33,11 +33,10 @@
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
+#ifdef KERN_TLS
 #include <sys/param.h>
 #include <sys/ktr.h>
-#ifdef KERN_TLS
 #include <sys/ktls.h>
-#endif
 #include <sys/sglist.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
@@ -46,10 +45,8 @@ __FBSDID("$FreeBSD$");
 #include <netinet/in_pcb.h>
 #include <netinet/tcp_var.h>
 #include <netinet/toecore.h>
-#ifdef KERN_TLS
 #include <opencrypto/cryptodev.h>
 #include <opencrypto/xform.h>
-#endif
 
 #ifdef TCP_OFFLOAD
 #include "common/common.h"
@@ -89,23 +86,6 @@ tls_tx_key(struct toepcb *toep)
 	return (tls_ofld->tx_key_addr >= 0);
 }
 
-int
-tls_rx_key(struct toepcb *toep)
-{
-	struct tls_ofld_info *tls_ofld = &toep->tls;
-
-	return (tls_ofld->rx_key_addr >= 0);
-}
-
-static int
-key_size(struct toepcb *toep)
-{
-	struct tls_ofld_info *tls_ofld = &toep->tls;
-
-	return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
-		tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
-}
-
 /* Set TLS Key-Id in TCB */
 static void
 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
@@ -145,125 +125,6 @@ tls_clr_ofld_mode(struct toepcb *toep)
 	toep->params.ulp_mode = ULP_MODE_NONE;
 }
 
-static void
-tls_clr_quiesce(struct toepcb *toep)
-{
-
-	tls_stop_handshake_timer(toep);
-	t4_clear_rx_quiesce(toep);
-}
-
-/*
- * Calculate the TLS data expansion size
- */
-static int
-tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
-    unsigned short *pdus_per_ulp)
-{
-	struct tls_ofld_info *tls_ofld = &toep->tls;
-	struct tls_scmd *scmd = &tls_ofld->scmd0;
-	int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
-	    pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
-	int exp_per_pdu = 0;
-	int hdr_len = TLS_HEADER_LENGTH;
-
-	do {
-		max_frag_size = tls_ofld->k_ctx.frag_size;
-		if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
-		   SCMD_CIPH_MODE_AES_GCM) {
-			frag_count = (data_len / max_frag_size);
-			exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
-				hdr_len;
-			expn_size =  frag_count * exp_per_pdu;
-			if (full_pdus_only) {
-				*pdus_per_ulp = data_len / (exp_per_pdu +
-					max_frag_size);
-				if (*pdus_per_ulp > 32)
-					*pdus_per_ulp = 32;
-				else if(!*pdus_per_ulp)
-					*pdus_per_ulp = 1;
-				expn_size = (*pdus_per_ulp) * exp_per_pdu;
-				break;
-			}
-			if ((last_frag_size = data_len % max_frag_size) > 0) {
-				frag_count += 1;
-				expn_size += exp_per_pdu;
-			}
-			break;
-		} else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
-			   SCMD_CIPH_MODE_NOP) {
-			/* Calculate the number of fragments we can make */
-			frag_count  = (data_len / max_frag_size);
-			if (frag_count > 0) {
-				pad_per_pdu = (((howmany((max_frag_size +
-						       tls_ofld->mac_length),
-						      CIPHER_BLOCK_SIZE)) *
-						CIPHER_BLOCK_SIZE) -
-					       (max_frag_size +
-						tls_ofld->mac_length));
-				if (!pad_per_pdu)
-					pad_per_pdu = CIPHER_BLOCK_SIZE;
-				exp_per_pdu = pad_per_pdu +
-				       	tls_ofld->mac_length +
-					hdr_len + CIPHER_BLOCK_SIZE;
-				expn_size = frag_count * exp_per_pdu;
-			}
-			if (full_pdus_only) {
-				*pdus_per_ulp = data_len / (exp_per_pdu +
-					max_frag_size);
-				if (*pdus_per_ulp > 32)
-					*pdus_per_ulp = 32;
-				else if (!*pdus_per_ulp)
-					*pdus_per_ulp = 1;
-				expn_size = (*pdus_per_ulp) * exp_per_pdu;
-				break;
-			}
-			/* Consider the last fragment */
-			if ((last_frag_size = data_len % max_frag_size) > 0) {
-				pad_last_pdu = (((howmany((last_frag_size +
-							tls_ofld->mac_length),
-						       CIPHER_BLOCK_SIZE)) *
-						 CIPHER_BLOCK_SIZE) -
-						(last_frag_size +
-						 tls_ofld->mac_length));
-				if (!pad_last_pdu)
-					pad_last_pdu = CIPHER_BLOCK_SIZE;
-				expn_size += (pad_last_pdu +
-					      tls_ofld->mac_length + hdr_len +
-					      CIPHER_BLOCK_SIZE);
-			}
-		}
-	} while (0);
-
-	return (expn_size);
-}
-
-/* Copy Key to WR */
-static void
-tls_copy_tx_key(struct toepcb *toep, void *dst)
-{
-	struct tls_ofld_info *tls_ofld = &toep->tls;
-	struct ulptx_sc_memrd *sc_memrd;
-	struct ulptx_idata *sc;
-
-	if (tls_ofld->k_ctx.tx_key_info_size <= 0)
-		return;
-
-	if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
-		sc = dst;
-		sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
-		sc->len = htobe32(0);
-		sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
-		sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
-		    V_ULP_TX_SC_MORE(1) |
-		    V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
-		sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
-	} else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
-		memcpy(dst, &tls_ofld->k_ctx.tx,
-		    tls_ofld->k_ctx.tx_key_info_size);
-	}
-}
-
 /* TLS/DTLS content type  for CPL SFO */
 static inline unsigned char
 tls_content_type(unsigned char content_type)
@@ -286,64 +147,194 @@ tls_content_type(unsigned char content_type)
 	return CPL_TX_TLS_SFO_TYPE_DATA;
 }
 
-static unsigned char
-get_cipher_key_size(unsigned int ck_size)
+static int
+tls_key_info_size(struct ktls_session *tls)
+{
+	u_int key_info_size, mac_key_size;
+
+	key_info_size = sizeof(struct tx_keyctx_hdr) +
+	    tls->params.cipher_key_len;
+	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
+		key_info_size += GMAC_BLOCK_LEN;
+	} else {
+		switch (tls->params.auth_algorithm) {
+		case CRYPTO_SHA1_HMAC:
+			mac_key_size = SHA1_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_256_HMAC:
+			mac_key_size = SHA2_256_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_384_HMAC:
+			mac_key_size = SHA2_512_HASH_LEN;
+			break;
+		default:
+			__assert_unreachable();
+		}
+		key_info_size += roundup2(mac_key_size, 16) * 2;
+	}
+	return (key_info_size);
+}
+
+static int
+tls_proto_ver(struct ktls_session *tls)
+{
+	if (tls->params.tls_vminor == TLS_MINOR_VER_ONE)
+		return (SCMD_PROTO_VERSION_TLS_1_1);
+	else
+		return (SCMD_PROTO_VERSION_TLS_1_2);
+}
+
+static int
+tls_cipher_mode(struct ktls_session *tls)
 {
-	switch (ck_size) {
-	case AES_NOP: /* NOP */
-		return 15;
-	case AES_128: /* AES128 */
-		return CH_CK_SIZE_128;
-	case AES_192: /* AES192 */
-		return CH_CK_SIZE_192;
-	case AES_256: /* AES256 */
-		return CH_CK_SIZE_256;
+	switch (tls->params.cipher_algorithm) {
+	case CRYPTO_AES_CBC:
+		return (SCMD_CIPH_MODE_AES_CBC);
+	case CRYPTO_AES_NIST_GCM_16:
+		return (SCMD_CIPH_MODE_AES_GCM);
 	default:
-		return CH_CK_SIZE_256;
+		return (SCMD_CIPH_MODE_NOP);
 	}
 }
 
-static unsigned char
-get_mac_key_size(unsigned int mk_size)
+static int
+tls_auth_mode(struct ktls_session *tls)
 {
-	switch (mk_size) {
-	case SHA_NOP: /* NOP */
-		return CH_MK_SIZE_128;
-	case SHA_GHASH: /* GHASH */
-	case SHA_512: /* SHA512 */
-		return CH_MK_SIZE_512;
-	case SHA_224: /* SHA2-224 */
-		return CH_MK_SIZE_192;
-	case SHA_256: /* SHA2-256*/
-		return CH_MK_SIZE_256;
-	case SHA_384: /* SHA384 */
-		return CH_MK_SIZE_512;
-	case SHA1: /* SHA1 */
+	switch (tls->params.cipher_algorithm) {
+	case CRYPTO_AES_CBC:
+		switch (tls->params.auth_algorithm) {
+		case CRYPTO_SHA1_HMAC:
+			return (SCMD_AUTH_MODE_SHA1);
+		case CRYPTO_SHA2_256_HMAC:
+			return (SCMD_AUTH_MODE_SHA256);
+		case CRYPTO_SHA2_384_HMAC:
+			return (SCMD_AUTH_MODE_SHA512_384);
+		default:
+			return (SCMD_AUTH_MODE_NOP);
+		}
+	case CRYPTO_AES_NIST_GCM_16:
+		return (SCMD_AUTH_MODE_GHASH);
+	default:
+		return (SCMD_AUTH_MODE_NOP);
+	}
+}
+
+static int
+tls_hmac_ctrl(struct ktls_session *tls)
+{
+	switch (tls->params.cipher_algorithm) {
+	case CRYPTO_AES_CBC:
+		return (SCMD_HMAC_CTRL_NO_TRUNC);
+	case CRYPTO_AES_NIST_GCM_16:
+		return (SCMD_HMAC_CTRL_NOP);
 	default:
-		return CH_MK_SIZE_160;
+		return (SCMD_HMAC_CTRL_NOP);
 	}
 }
 
-static unsigned int
-get_proto_ver(int proto_ver)
+static int
+tls_cipher_key_size(struct ktls_session *tls)
 {
-	switch (proto_ver) {
-	case TLS1_2_VERSION:
-		return TLS_1_2_VERSION;
-	case TLS1_1_VERSION:
-		return TLS_1_1_VERSION;
-	case DTLS1_2_VERSION:
-		return DTLS_1_2_VERSION;
+	switch (tls->params.cipher_key_len) {
+	case 128 / 8:
+		return (CHCR_KEYCTX_CIPHER_KEY_SIZE_128);
+	case 192 / 8:
+		return (CHCR_KEYCTX_CIPHER_KEY_SIZE_192);
+	case 256 / 8:
+		return (CHCR_KEYCTX_CIPHER_KEY_SIZE_256);
 	default:
-		return TLS_VERSION_MAX;
+		__assert_unreachable();
+	}
+}
+
+static int
+tls_mac_key_size(struct ktls_session *tls)
+{
+	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
+		/*
+		 * XXX: This used to use 128 (SHA_NOP) for TOE,
+		 * but NIC TLS has always used 512.
+		 */
+		return (CHCR_KEYCTX_MAC_KEY_SIZE_512);
+	else {
+		switch (tls->params.auth_algorithm) {
+		case CRYPTO_SHA1_HMAC:
+			return (CHCR_KEYCTX_MAC_KEY_SIZE_160);
+		case CRYPTO_SHA2_256_HMAC:
+			return (CHCR_KEYCTX_MAC_KEY_SIZE_256);
+		case CRYPTO_SHA2_384_HMAC:
+			return (CHCR_KEYCTX_MAC_KEY_SIZE_512);
+		default:
+			__assert_unreachable();
+		}
+	}
+}
+
+static void
+prepare_tls_keys(char *key, char *salt, struct ktls_session *tls,
+    int direction)
+{
+	struct auth_hash *axf;
+	u_int mac_key_size;
+	char *hash;
+
+	if (direction == KTLS_RX &&
+	    tls->params.cipher_algorithm == CRYPTO_AES_CBC)
+		t4_aes_getdeckey(key, tls->params.cipher_key,
+		    tls->params.cipher_key_len * 8);
+	else
+		memcpy(key, tls->params.cipher_key,
+		    tls->params.cipher_key_len);
+	hash = key + tls->params.cipher_key_len;
+	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
+		memcpy(salt, tls->params.iv, SALT_SIZE);
+		t4_init_gmac_hash(tls->params.cipher_key,
+		    tls->params.cipher_key_len, hash);
+	} else {
+		switch (tls->params.auth_algorithm) {
+		case CRYPTO_SHA1_HMAC:
+			axf = &auth_hash_hmac_sha1;
+			mac_key_size = SHA1_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_256_HMAC:
+			axf = &auth_hash_hmac_sha2_256;
+			mac_key_size = SHA2_256_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_384_HMAC:
+			axf = &auth_hash_hmac_sha2_384;
+			mac_key_size = SHA2_512_HASH_LEN;
+			break;
+		default:
+			__assert_unreachable();
+		}
+		t4_init_hmac_digest(axf, mac_key_size, tls->params.auth_key,
+		    tls->params.auth_key_len, hash);
 	}
 }
 
+/* Rx key */
 static void
-tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
+prepare_rxkey_wr(struct tls_keyctx *kwr, struct ktls_session *tls)
 {
 
-	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
+	kwr->u.rxhdr.flitcnt_hmacctrl =
+		((tls_key_info_size(tls) / 16) << 3) | tls_hmac_ctrl(tls);
+
+	kwr->u.rxhdr.protover_ciphmode =
+		V_TLS_KEYCTX_TX_WR_PROTOVER(tls_proto_ver(tls)) |
+		V_TLS_KEYCTX_TX_WR_CIPHMODE(tls_cipher_mode(tls));
+
+	kwr->u.rxhdr.authmode_to_rxvalid =
+	        V_TLS_KEYCTX_TX_WR_AUTHMODE(tls_auth_mode(tls)) |
+		V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
+		V_TLS_KEYCTX_TX_WR_RXVALID(1);
+
+	kwr->u.rxhdr.ivpresent_to_rxmk_size =
+		V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
+		V_TLS_KEYCTX_TX_WR_RXCK_SIZE(tls_cipher_key_size(tls)) |
+		V_TLS_KEYCTX_TX_WR_RXMK_SIZE(tls_mac_key_size(tls));
+
+	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
 		kwr->u.rxhdr.ivinsert_to_authinsrt =
 		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
 			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
@@ -353,11 +344,11 @@ tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
 			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
 			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
 			V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
-		kwr->u.rxhdr.ivpresent_to_rxmk_size &=
-			~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
-		kwr->u.rxhdr.authmode_to_rxvalid &=
-			~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
 	} else {
+		kwr->u.rxhdr.authmode_to_rxvalid |=
+			V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1);
+		kwr->u.rxhdr.ivpresent_to_rxmk_size |=
+			V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1);
 		kwr->u.rxhdr.ivinsert_to_authinsrt =
 		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
 			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
@@ -368,74 +359,27 @@ tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
 			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
 			V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
 	}
-}
-
-/* Rx key */
-static void
-prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
-{
-	unsigned int ck_size = kctx->cipher_secret_size;
-	unsigned int mk_size = kctx->mac_secret_size;
-	int proto_ver = kctx->proto_ver;
-
-	kwr->u.rxhdr.flitcnt_hmacctrl =
-		((kctx->rx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
 
-	kwr->u.rxhdr.protover_ciphmode =
-		V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
-		V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
-
-	kwr->u.rxhdr.authmode_to_rxvalid =
-		V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
-		V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
-		V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
-		V_TLS_KEYCTX_TX_WR_RXVALID(1);
-
-	kwr->u.rxhdr.ivpresent_to_rxmk_size =
-		V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
-		V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
-		V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
-		V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
-
-	tls_rxkey_flit1(kwr, kctx);
-
-	/* No key reversal for GCM */
-	if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
-		t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
-				 (kctx->cipher_secret_size << 3));
-		memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
-		       kctx->rx.key + kctx->cipher_secret_size,
-		       (IPAD_SIZE + OPAD_SIZE));
-	} else {
-		memcpy(kwr->keys.edkey, kctx->rx.key,
-		       (kctx->rx_key_info_size - SALT_SIZE));
-		memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
-	}
+	prepare_tls_keys(kwr->keys.edkey, kwr->u.rxhdr.rxsalt, tls, KTLS_RX);
 }
 
 /* Tx key */
 static void
-prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
+prepare_txkey_wr(struct tls_keyctx *kwr, struct ktls_session *tls)
 {
-	unsigned int ck_size = kctx->cipher_secret_size;
-	unsigned int mk_size = kctx->mac_secret_size;
 
-	kwr->u.txhdr.ctxlen =
-		(kctx->tx_key_info_size >> 4);
+	kwr->u.txhdr.ctxlen = tls_key_info_size(tls) / 16;
 	kwr->u.txhdr.dualck_to_txvalid =
-		V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
 		V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
-		V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
-		V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
+		V_TLS_KEYCTX_TX_WR_TXCK_SIZE(tls_cipher_key_size(tls)) |
+		V_TLS_KEYCTX_TX_WR_TXMK_SIZE(tls_mac_key_size(tls)) |
 		V_TLS_KEYCTX_TX_WR_TXVALID(1);
-
-	memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
-	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
-		memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
-		kwr->u.txhdr.dualck_to_txvalid &=
-			~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
-	}
+	if (tls->params.cipher_algorithm == CRYPTO_AES_CBC)
+		kwr->u.txhdr.dualck_to_txvalid |=
+		    V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1);
 	kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
+
+	prepare_tls_keys(kwr->keys.edkey, kwr->u.txhdr.txsalt, tls, KTLS_TX);
 }
 
 /* TLS Key memory management */
@@ -476,23 +420,17 @@ clear_tls_keyid(struct toepcb *toep)
 }
 
 static int
-get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
-{
-	return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
-		((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1));
-}
-
-static int
-get_tp_plen_max(struct tls_ofld_info *tls_ofld)
+get_tp_plen_max(struct ktls_session *tls)
 {
 	int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
 
-	return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
+	return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
 }
 
 /* Send request to get the key-id */
 static int
-tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
+tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
+    int direction)
 {
 	struct tls_ofld_info *tls_ofld = &toep->tls;
 	struct adapter *sc = td_adapter(toep->td);
@@ -509,13 +447,8 @@ tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
 	if (toep->txsd_avail == 0)
 		return (EAGAIN);
 
-	/* Dont initialize key for re-neg */
-	if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
-		if ((keyid = get_new_keyid(toep)) < 0) {
-			return (ENOSPC);
-		}
-	} else {
-		keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
+	if ((keyid = get_new_keyid(toep)) < 0) {
+		return (ENOSPC);
 	}
 
 	wr = alloc_wrqe(len, &toep->ofld_txq->wrq);
@@ -530,9 +463,10 @@ tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
 	    F_FW_WR_ATOMIC);
 	kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
 	    V_FW_WR_FLOWID(toep->tid));
-	kwr->protocol = get_proto_ver(k_ctx->proto_ver);
-	kwr->mfs = htons(k_ctx->frag_size);
-	kwr->reneg_to_write_rx = k_ctx->l_p_key;
+	kwr->protocol = tls_proto_ver(tls);
+	kwr->mfs = htons(tls->params.max_frame_len);
+	kwr->reneg_to_write_rx = V_KEY_GET_LOC(direction == KTLS_TX ?
+	    KEY_WRITE_TX : KEY_WRITE_RX);
 
 	/* master command */
 	kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
@@ -549,12 +483,12 @@ tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
 	kctx = (struct tls_keyctx *)(kwr + 1);
 	memset(kctx, 0, kctxlen);
 
-	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
+	if (direction == KTLS_TX) {
 		tls_ofld->tx_key_addr = keyid;
-		prepare_txkey_wr(kctx, k_ctx);
-	} else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
+		prepare_txkey_wr(kctx, tls);
+	} else {
 		tls_ofld->rx_key_addr = keyid;
-		prepare_rxkey_wr(kctx, k_ctx);
+		prepare_rxkey_wr(kctx, tls);
 	}
 
 	txsd = &toep->txsd[toep->txsd_pidx];
@@ -570,163 +504,6 @@ tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
 	return (0);
 }
 
-/* Store a key received from SSL in DDR. */
-static int
-program_key_context(struct tcpcb *tp, struct toepcb *toep,
-    struct tls_key_context *uk_ctx)
-{
-	struct adapter *sc = td_adapter(toep->td);
-	struct tls_ofld_info *tls_ofld = &toep->tls;
-	struct tls_key_context *k_ctx;
-	int error, key_offset;
-
-	if (tp->t_state != TCPS_ESTABLISHED) {
-		/*
-		 * XXX: Matches Linux driver, but not sure this is a
-		 * very appropriate error.
-		 */
-		return (ENOENT);
-	}
-
-	/* Stop timer on handshake completion */
-	tls_stop_handshake_timer(toep);
-
-	toep->flags &= ~TPF_FORCE_CREDITS;
-
-	CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
-	    G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
-	    "KEY_WRITE_TX", uk_ctx->proto_ver);
-
-	if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
-	    ulp_mode(toep) != ULP_MODE_TLS)
-		return (EOPNOTSUPP);
-
-	/* Don't copy the 'tx' and 'rx' fields. */
-	k_ctx = &tls_ofld->k_ctx;
-	memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
-	    sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
-
-	/* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
-	if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
-		if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
-			tls_ofld->rx_key_addr = -1;
-			t4_clear_rx_quiesce(toep);
-		} else {
-			tls_ofld->tx_key_addr = -1;
-		}
-		return (0);
-	}
-
-	if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
-		k_ctx->iv_size = 4;
-		k_ctx->mac_first = 0;
-		k_ctx->hmac_ctrl = 0;
-	} else {
-		k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
-		k_ctx->mac_first = 1;
-	}
-
-	tls_ofld->scmd0.seqno_numivs =
-		(V_SCMD_SEQ_NO_CTRL(3) |
-		 V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
-		 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
-		 V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
-		 V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
-		 V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
-		 V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
-		 V_SCMD_IV_SIZE(k_ctx->iv_size));
-
-	tls_ofld->scmd0.ivgen_hdrlen =
-		(V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
-		 V_SCMD_KEY_CTX_INLINE(0) |
-		 V_SCMD_TLS_FRAG_ENABLE(1));
-
-	tls_ofld->mac_length = k_ctx->mac_secret_size;
-
-	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
-		k_ctx->rx = uk_ctx->rx;
-		/* Dont initialize key for re-neg */
-		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
-			tls_ofld->rx_key_addr = -1;
-	} else {
-		k_ctx->tx = uk_ctx->tx;
-		/* Dont initialize key for re-neg */
-		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
-			tls_ofld->tx_key_addr = -1;
-	}
-
-	/* Flush pending data before new Tx key becomes active */
-	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
-		struct sockbuf *sb;
-
-		/* XXX: This might not drain everything. */
-		t4_push_frames(sc, toep, 0);
-		sb = &toep->inp->inp_socket->so_snd;
-		SOCKBUF_LOCK(sb);
-
-		/* XXX: This asserts that everything has been pushed. */
-		MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
-		sb->sb_sndptr = NULL;
-		tls_ofld->sb_off = sbavail(sb);
-		SOCKBUF_UNLOCK(sb);
-		tls_ofld->tx_seq_no = 0;
-	}
-
-	if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
-	    (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
-
-		/*
-		 * XXX: The userland library sets tx_key_info_size, not
-		 * rx_key_info_size.
-		 */
-		k_ctx->rx_key_info_size = k_ctx->tx_key_info_size;
-
-		error = tls_program_key_id(toep, k_ctx);
-		if (error) {
-			/* XXX: Only clear quiesce for KEY_WRITE_RX? */
-			t4_clear_rx_quiesce(toep);
-			return (error);
-		}
-	}
-
-	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
-		/*
-		 * RX key tags are an index into the key portion of MA
-		 * memory stored as an offset from the base address in
-		 * units of 64 bytes.
-		 */
-		key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
-		t4_set_tls_keyid(toep, key_offset / 64);
-		t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
-				 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
-				 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
-						V_TF_TLS_CONTROL(1) |
-						V_TF_TLS_ACTIVE(1) |
-						V_TF_TLS_ENABLE(1))));
-		t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
-				 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
-				 V_TCB_TLS_SEQ(0));
-		t4_clear_rx_quiesce(toep);
-
-		toep->flags |= TPF_TLS_RECEIVE;
-	} else {
-		unsigned short pdus_per_ulp;
-
-		if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
-			tls_ofld->tx_key_addr = 1;
-
-		tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
-		tls_ofld->expn_per_ulp = tls_expansion_size(toep,
-				tls_ofld->fcplenmax, 1, &pdus_per_ulp);
-		tls_ofld->pdus_per_ulp = pdus_per_ulp;
-		tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
-			((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
-			 tls_ofld->k_ctx.frag_size);
-	}
-
-	return (0);
-}
-
 /*
  * In some cases a client connection can hang without sending the
  * ServerHelloDone message from the NIC to the host.  Send a dummy
@@ -791,191 +568,12 @@ tls_stop_handshake_timer(struct toepcb *toep)
 	callout_stop(&tls_ofld->handshake_timer);
 }
 
-int
-t4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
-{
-	struct tls_key_context uk_ctx;
-	struct inpcb *inp;
-	struct tcpcb *tp;
-	struct toepcb *toep;
-	int error, optval;
-
-	error = 0;
-	if (sopt->sopt_dir == SOPT_SET &&
-	    sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
-		error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
-		    sizeof(uk_ctx));
-		if (error)
-			return (error);
-	}
-
-	inp = sotoinpcb(so);
-	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
-	INP_WLOCK(inp);
-	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
-		INP_WUNLOCK(inp);
-		return (ECONNRESET);
-	}
-	tp = intotcpcb(inp);
-	toep = tp->t_toe;
-	switch (sopt->sopt_dir) {
-	case SOPT_SET:
-		switch (sopt->sopt_name) {
-		case TCP_TLSOM_SET_TLS_CONTEXT:
-			if (toep->tls.mode == TLS_MODE_KTLS)
-				error = EINVAL;
-			else {
-				error = program_key_context(tp, toep, &uk_ctx);
-				if (error == 0)
-					toep->tls.mode = TLS_MODE_TLSOM;
-			}
-			INP_WUNLOCK(inp);
-			break;
-		case TCP_TLSOM_CLR_TLS_TOM:
-			if (toep->tls.mode == TLS_MODE_KTLS)
-				error = EINVAL;
-			else if (ulp_mode(toep) == ULP_MODE_TLS) {
-				CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
-				    __func__, toep->tid);
-				tls_clr_ofld_mode(toep);
-			} else
-				error = EOPNOTSUPP;
-			INP_WUNLOCK(inp);
-			break;
-		case TCP_TLSOM_CLR_QUIES:
-			if (toep->tls.mode == TLS_MODE_KTLS)
-				error = EINVAL;
-			else if (ulp_mode(toep) == ULP_MODE_TLS) {
-				CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
-				    __func__, toep->tid);
-				tls_clr_quiesce(toep);
-			} else
-				error = EOPNOTSUPP;
-			INP_WUNLOCK(inp);
-			break;
-		default:
-			INP_WUNLOCK(inp);
-			error = EOPNOTSUPP;
-			break;
-		}
-		break;
-	case SOPT_GET:
-		switch (sopt->sopt_name) {
-		case TCP_TLSOM_GET_TLS_TOM:
-			/*
-			 * TLS TX is permitted on any TOE socket, but
-			 * TLS RX requires a TLS ULP mode.
-			 */
-			optval = TLS_TOM_NONE;
-			if (can_tls_offload(td_adapter(toep->td)) &&
-			    toep->tls.mode != TLS_MODE_KTLS) {
-				switch (ulp_mode(toep)) {
-				case ULP_MODE_NONE:
-				case ULP_MODE_TCPDDP:
-					optval = TLS_TOM_TXONLY;
-					break;
-				case ULP_MODE_TLS:
-					optval = TLS_TOM_BOTH;
-					break;
-				}
-			}
-			CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
-			    __func__, toep->tid, optval);
-			INP_WUNLOCK(inp);
-			error = sooptcopyout(sopt, &optval, sizeof(optval));
-			break;
-		default:
-			INP_WUNLOCK(inp);
-			error = EOPNOTSUPP;
-			break;
-		}
-		break;
-	}
-	return (error);
-}
-
-#ifdef KERN_TLS
-static void
-init_ktls_key_context(struct ktls_session *tls, struct tls_key_context *k_ctx,
-    int direction)
-{
-	struct auth_hash *axf;
-	u_int key_info_size, mac_key_size;
-	char *hash, *key;
-
-	k_ctx->l_p_key = V_KEY_GET_LOC(direction == KTLS_TX ? KEY_WRITE_TX :
-	    KEY_WRITE_RX);
-	k_ctx->proto_ver = tls->params.tls_vmajor << 8 | tls->params.tls_vminor;
-	k_ctx->cipher_secret_size = tls->params.cipher_key_len;
-	key_info_size = sizeof(struct tx_keyctx_hdr) +
-	    k_ctx->cipher_secret_size;
-	if (direction == KTLS_TX)
-		key = k_ctx->tx.key;
-	else
-		key = k_ctx->rx.key;
-	memcpy(key, tls->params.cipher_key, tls->params.cipher_key_len);
-	hash = key + tls->params.cipher_key_len;
-	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
-		k_ctx->state.auth_mode = SCMD_AUTH_MODE_GHASH;
-		k_ctx->state.enc_mode = SCMD_CIPH_MODE_AES_GCM;
-		k_ctx->iv_size = 4;
-		k_ctx->mac_first = 0;
-		k_ctx->hmac_ctrl = SCMD_HMAC_CTRL_NOP;
-		key_info_size += GMAC_BLOCK_LEN;
-		k_ctx->mac_secret_size = 0;
-		if (direction == KTLS_TX)
-			memcpy(k_ctx->tx.salt, tls->params.iv, SALT_SIZE);
-		else
-			memcpy(k_ctx->rx.salt, tls->params.iv, SALT_SIZE);
-		t4_init_gmac_hash(tls->params.cipher_key,
-		    tls->params.cipher_key_len, hash);
-	} else {
-		switch (tls->params.auth_algorithm) {
-		case CRYPTO_SHA1_HMAC:
-			axf = &auth_hash_hmac_sha1;
-			mac_key_size = SHA1_HASH_LEN;
-			k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA1;
-			break;
-		case CRYPTO_SHA2_256_HMAC:
-			axf = &auth_hash_hmac_sha2_256;
-			mac_key_size = SHA2_256_HASH_LEN;
*** 1230 LINES SKIPPED ***


More information about the dev-commits-src-all mailing list