svn commit: r367386 - in head: share/man/man4 sys/amd64/conf sys/conf sys/dev/qat sys/modules sys/modules/qat
Mark Johnston
markj at FreeBSD.org
Thu Nov 5 15:55:24 UTC 2020
Author: markj
Date: Thu Nov 5 15:55:23 2020
New Revision: 367386
URL: https://svnweb.freebsd.org/changeset/base/367386
Log:
Add qat(4)
This provides an OpenCrypto driver for Intel QuickAssist devices. The
driver was initially ported from NetBSD and comes with a few
improvements:
- support for GMAC/AES-GCM, AES-CTR and AES-XTS, and support for
SHA/HMAC-authenticated encryption
- support for detaching the driver
- various bug fixes
- DH895X support
Discussed with: jhb
MFC after: 3 days
Sponsored by: Rubicon Communications, LLC (Netgate)
Differential Revision: https://reviews.freebsd.org/D26963
Added:
head/share/man/man4/qat.4 (contents, props changed)
head/sys/dev/qat/
head/sys/dev/qat/qat.c (contents, props changed)
head/sys/dev/qat/qat_ae.c (contents, props changed)
head/sys/dev/qat/qat_aevar.h (contents, props changed)
head/sys/dev/qat/qat_c2xxx.c (contents, props changed)
head/sys/dev/qat/qat_c2xxxreg.h (contents, props changed)
head/sys/dev/qat/qat_c3xxx.c (contents, props changed)
head/sys/dev/qat/qat_c3xxxreg.h (contents, props changed)
head/sys/dev/qat/qat_c62x.c (contents, props changed)
head/sys/dev/qat/qat_c62xreg.h (contents, props changed)
head/sys/dev/qat/qat_d15xx.c (contents, props changed)
head/sys/dev/qat/qat_d15xxreg.h (contents, props changed)
head/sys/dev/qat/qat_dh895xcc.c (contents, props changed)
head/sys/dev/qat/qat_dh895xccreg.h (contents, props changed)
head/sys/dev/qat/qat_hw15.c (contents, props changed)
head/sys/dev/qat/qat_hw15reg.h (contents, props changed)
head/sys/dev/qat/qat_hw15var.h (contents, props changed)
head/sys/dev/qat/qat_hw17.c (contents, props changed)
head/sys/dev/qat/qat_hw17reg.h (contents, props changed)
head/sys/dev/qat/qat_hw17var.h (contents, props changed)
head/sys/dev/qat/qatreg.h (contents, props changed)
head/sys/dev/qat/qatvar.h (contents, props changed)
head/sys/modules/qat/
head/sys/modules/qat/Makefile (contents, props changed)
Modified:
head/share/man/man4/Makefile
head/sys/amd64/conf/NOTES
head/sys/conf/files.x86
head/sys/modules/Makefile
Modified: head/share/man/man4/Makefile
==============================================================================
--- head/share/man/man4/Makefile Thu Nov 5 15:27:38 2020 (r367385)
+++ head/share/man/man4/Makefile Thu Nov 5 15:55:23 2020 (r367386)
@@ -431,6 +431,7 @@ MAN= aac.4 \
pty.4 \
puc.4 \
pwmc.4 \
+ ${_qat.4} \
${_qlxge.4} \
${_qlxgb.4} \
${_qlxgbe.4} \
@@ -823,6 +824,7 @@ _nvram.4= nvram.4
_ossl.4= ossl.4
_padlock.4= padlock.4
_pchtherm.4= pchtherm.4
+_qat.4= qat.4
_rr232x.4= rr232x.4
_speaker.4= speaker.4
_spkr.4= spkr.4
Added: head/share/man/man4/qat.4
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ head/share/man/man4/qat.4 Thu Nov 5 15:55:23 2020 (r367386)
@@ -0,0 +1,99 @@
+.\"-
+.\" Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 5, 2020
+.Dt QAT 4
+.Os
+.Sh NAME
+.Nm qat
+.Nd Intel QuickAssist Technology (QAT) driver
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device crypto"
+.Cd "device cryptodev"
+.Cd "device qat"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following lines in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+qat_load="YES"
+qat_c2xxxfw_load="YES"
+qat_c3xxxfw_load="YES"
+qat_c63xfw_load="YES"
+qat_d15xxfw_load="YES"
+qat_dh895xcc_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver implements
+.Xr crypto 4
+support for some of the cryptographic acceleration functions of the Intel
+QuickAssist device.
+The
+.Nm
+driver supports the QAT devices integrated with Atom C2000 and C3000 and Xeon
+C620 and D-1500 chipsets, and the Intel QAT Adapter 8950.
+It can accelerate AES in CBC, CTR, XTS (except for the C2000) and GCM modes,
+and can perform authenticated encryption combining the CBC, CTR and XTS modes
+with SHA1-HMAC and SHA2-HMAC.
+The
+.Nm
+driver can also compute SHA1 and SHA2 digests.
+.Sh SEE ALSO
+.Xr crypto 4 ,
+.Xr ipsec 4 ,
+.Xr pci 4 ,
+.Xr random 4 ,
+.Xr crypto 7 ,
+.Xr crypto 9
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 13.0 .
+.Sh AUTHORS
+The
+.Nm
+driver was written for
+.Nx
+by
+.An Hikaru Abe Aq Mt hikaru at iij.ad.jp
+and ported to
+.Fx
+by
+.An Mark Johnston Aq Mt markj at FreeBSD.org .
+.Sh BUGS
+Some Atom C2000 QAT devices have two acceleration engines instead of one.
+The
+.Nm
+driver currently misbehaves when both are enabled and thus does not enable
+the second acceleration engine if one is present.
Modified: head/sys/amd64/conf/NOTES
==============================================================================
--- head/sys/amd64/conf/NOTES Thu Nov 5 15:27:38 2020 (r367385)
+++ head/sys/amd64/conf/NOTES Thu Nov 5 15:55:23 2020 (r367386)
@@ -471,6 +471,10 @@ device vmd_bus # bus for VMD children
device pmspcv
#
+# Intel QuickAssist
+device qat
+
+#
# SafeNet crypto driver: can be moved to the MI NOTES as soon as
# it's tested on a big-endian machine
#
Modified: head/sys/conf/files.x86
==============================================================================
--- head/sys/conf/files.x86 Thu Nov 5 15:27:38 2020 (r367385)
+++ head/sys/conf/files.x86 Thu Nov 5 15:55:23 2020 (r367386)
@@ -291,6 +291,15 @@ dev/isci/scil/scif_sas_task_request_state_handlers.c
dev/isci/scil/scif_sas_task_request_states.c optional isci
dev/isci/scil/scif_sas_timer.c optional isci
dev/itwd/itwd.c optional itwd
+dev/qat/qat.c optional qat
+dev/qat/qat_ae.c optional qat
+dev/qat/qat_c2xxx.c optional qat
+dev/qat/qat_c3xxx.c optional qat
+dev/qat/qat_c62x.c optional qat
+dev/qat/qat_d15xx.c optional qat
+dev/qat/qat_dh895xcc.c optional qat
+dev/qat/qat_hw15.c optional qat
+dev/qat/qat_hw17.c optional qat
libkern/x86/crc32_sse42.c standard
#
# x86 shared code between IA32 and AMD64 architectures
Added: head/sys/dev/qat/qat.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ head/sys/dev/qat/qat.c Thu Nov 5 15:55:23 2020 (r367386)
@@ -0,0 +1,2140 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/firmware.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/md5.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+
+#include "cryptodev_if.h"
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qatvar.h"
+#include "qat_aevar.h"
+
+extern struct qat_hw qat_hw_c2xxx;
+extern struct qat_hw qat_hw_c3xxx;
+extern struct qat_hw qat_hw_c62x;
+extern struct qat_hw qat_hw_d15xx;
+extern struct qat_hw qat_hw_dh895xcc;
+
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18
+#define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2
+#define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3
+#define PCI_PRODUCT_INTEL_C620_QAT 0x37c8
+#define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9
+#define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54
+#define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55
+#define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435
+#define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443
+
+static const struct qat_product {
+ uint16_t qatp_vendor;
+ uint16_t qatp_product;
+ const char *qatp_name;
+ enum qat_chip_type qatp_chip;
+ const struct qat_hw *qatp_hw;
+} qat_products[] = {
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
+ "Intel C2000 QuickAssist PF",
+ QAT_CHIP_C2XXX, &qat_hw_c2xxx },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
+ "Intel C3000 QuickAssist PF",
+ QAT_CHIP_C3XXX, &qat_hw_c3xxx },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
+ "Intel C620/Xeon D-2100 QuickAssist PF",
+ QAT_CHIP_C62X, &qat_hw_c62x },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
+ "Intel Xeon D-1500 QuickAssist PF",
+ QAT_CHIP_D15XX, &qat_hw_d15xx },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT,
+ "Intel 8950 QuickAssist PCIe Adapter PF",
+ QAT_CHIP_DH895XCC, &qat_hw_dh895xcc },
+ { 0, 0, NULL, 0, NULL },
+};
+
+/* Hash Algorithm specific structure */
+
+/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
+ 0x67, 0x45, 0x23, 0x01,
+ 0xef, 0xcd, 0xab, 0x89,
+ 0x98, 0xba, 0xdc, 0xfe,
+ 0x10, 0x32, 0x54, 0x76,
+ 0xc3, 0xd2, 0xe1, 0xf0
+};
+
+/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
+ 0x6a, 0x09, 0xe6, 0x67,
+ 0xbb, 0x67, 0xae, 0x85,
+ 0x3c, 0x6e, 0xf3, 0x72,
+ 0xa5, 0x4f, 0xf5, 0x3a,
+ 0x51, 0x0e, 0x52, 0x7f,
+ 0x9b, 0x05, 0x68, 0x8c,
+ 0x1f, 0x83, 0xd9, 0xab,
+ 0x5b, 0xe0, 0xcd, 0x19
+};
+
+/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
+ 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
+ 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
+ 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+ 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
+ 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
+ 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
+ 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
+ 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
+};
+
+/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
+ 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
+ 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
+ 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
+ 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
+ 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
+ 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+ 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
+ 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
+};
+
+static const struct qat_sym_hash_alg_info sha1_info = {
+ .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE,
+ .qshai_init_state = sha1_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha1,
+ .qshai_state_offset = 0,
+ .qshai_state_word = 4,
+};
+
+static const struct qat_sym_hash_alg_info sha256_info = {
+ .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE,
+ .qshai_init_state = sha256_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha2_256,
+ .qshai_state_offset = offsetof(SHA256_CTX, state),
+ .qshai_state_word = 4,
+};
+
+static const struct qat_sym_hash_alg_info sha384_info = {
+ .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE,
+ .qshai_init_state = sha384_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha2_384,
+ .qshai_state_offset = offsetof(SHA384_CTX, state),
+ .qshai_state_word = 8,
+};
+
+static const struct qat_sym_hash_alg_info sha512_info = {
+ .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE,
+ .qshai_init_state = sha512_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha2_512,
+ .qshai_state_offset = offsetof(SHA512_CTX, state),
+ .qshai_state_word = 8,
+};
+
+static const struct qat_sym_hash_alg_info aes_gcm_info = {
+ .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE,
+ .qshai_sah = &auth_hash_nist_gmac_aes_128,
+};
+
+/* Hash QAT specific structures */
+
+static const struct qat_sym_hash_qat_info sha1_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA1,
+ .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA1_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA1_STATE2_SZ,
+};
+
+static const struct qat_sym_hash_qat_info sha256_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA256,
+ .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA256_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA256_STATE2_SZ
+};
+
+static const struct qat_sym_hash_qat_info sha384_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA384,
+ .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA384_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA384_STATE2_SZ
+};
+
+static const struct qat_sym_hash_qat_info sha512_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA512,
+ .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA512_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA512_STATE2_SZ
+};
+
+static const struct qat_sym_hash_qat_info aes_gcm_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128,
+ .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE,
+ .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ,
+ .qshqi_state2_len =
+ HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ,
+};
+
+static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
+ [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
+ [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
+ [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
+ [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
+ [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
+};
+
+static const struct qat_product *qat_lookup(device_t);
+static int qat_probe(device_t);
+static int qat_attach(device_t);
+static int qat_init(struct device *);
+static int qat_start(struct device *);
+static int qat_detach(device_t);
+
+static int qat_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp);
+static void qat_freesession(device_t dev, crypto_session_t cses);
+
+static int qat_setup_msix_intr(struct qat_softc *);
+
+static void qat_etr_init(struct qat_softc *);
+static void qat_etr_deinit(struct qat_softc *);
+static void qat_etr_bank_init(struct qat_softc *, int);
+static void qat_etr_bank_deinit(struct qat_softc *sc, int);
+
+static void qat_etr_ap_bank_init(struct qat_softc *);
+static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
+static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
+ uint32_t, int);
+static void qat_etr_ap_bank_setup_ring(struct qat_softc *,
+ struct qat_ring *);
+static int qat_etr_verify_ring_size(uint32_t, uint32_t);
+
+static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
+ struct qat_ring *);
+static void qat_etr_bank_intr(void *);
+
+static void qat_arb_update(struct qat_softc *, struct qat_bank *);
+
+static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie(
+ struct qat_crypto_bank *);
+static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
+ struct qat_sym_cookie *);
+static int qat_crypto_setup_ring(struct qat_softc *,
+ struct qat_crypto_bank *);
+static int qat_crypto_bank_init(struct qat_softc *,
+ struct qat_crypto_bank *);
+static int qat_crypto_init(struct qat_softc *);
+static void qat_crypto_deinit(struct qat_softc *);
+static int qat_crypto_start(struct qat_softc *);
+static void qat_crypto_stop(struct qat_softc *);
+static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
+
+static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver");
+
+static const struct qat_product *
+qat_lookup(device_t dev)
+{
+ const struct qat_product *qatp;
+
+ for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
+ if (pci_get_vendor(dev) == qatp->qatp_vendor &&
+ pci_get_device(dev) == qatp->qatp_product)
+ return qatp;
+ }
+ return NULL;
+}
+
+static int
+qat_probe(device_t dev)
+{
+ const struct qat_product *prod;
+
+ prod = qat_lookup(dev);
+ if (prod != NULL) {
+ device_set_desc(dev, prod->qatp_name);
+ return BUS_PROBE_DEFAULT;
+ }
+ return ENXIO;
+}
+
+static int
+qat_attach(device_t dev)
+{
+ struct qat_softc *sc = device_get_softc(dev);
+ const struct qat_product *qatp;
+ bus_size_t msixtbl_offset;
+ int bar, count, error, i, msixoff, msixtbl_bar;
+
+ sc->sc_dev = dev;
+ sc->sc_rev = pci_get_revid(dev);
+
+ qatp = qat_lookup(dev);
+ memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
+
+ /* Determine active accelerators and engines */
+ sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
+ sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
+
+ sc->sc_accel_num = 0;
+ for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
+ if (sc->sc_accel_mask & (1 << i))
+ sc->sc_accel_num++;
+ }
+ sc->sc_ae_num = 0;
+ for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
+ if (sc->sc_ae_mask & (1 << i))
+ sc->sc_ae_num++;
+ }
+
+ if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
+ device_printf(sc->sc_dev, "couldn't find acceleration");
+ goto fail;
+ }
+
+ MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL);
+ MPASS(sc->sc_ae_num <= MAX_NUM_AE);
+
+ /* Determine SKU and capabilities */
+ sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
+ sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
+ sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
+
+ /* Map BARs */
+ msixtbl_bar = 0;
+ msixtbl_offset = 0;
+ if (pci_find_cap(dev, PCIY_MSIX, &msixoff) == 0) {
+ uint32_t msixtbl;
+ msixtbl = pci_read_config(dev, msixoff + PCIR_MSIX_TABLE, 4);
+ msixtbl_offset = msixtbl & ~PCIM_MSIX_BIR_MASK;
+ msixtbl_bar = PCIR_BAR(msixtbl & PCIM_MSIX_BIR_MASK);
+ }
+
+ i = 0;
+ if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
+ MPASS(sc->sc_hw.qhw_sram_bar_id == 0);
+ uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4);
+ /* Skip SRAM BAR */
+ i = (fusectl & FUSECTL_MASK) ? 1 : 0;
+ }
+ for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) {
+ uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4);
+ if (val == 0 || !PCI_BAR_MEM(val))
+ continue;
+
+ sc->sc_rid[i] = PCIR_BAR(bar);
+ sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_rid[i], RF_ACTIVE);
+ if (sc->sc_res[i] == NULL) {
+ device_printf(dev, "couldn't map BAR %d\n", bar);
+ goto fail;
+ }
+
+ sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]);
+ sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]);
+
+ i++;
+ if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64)
+ bar++;
+ }
+
+ pci_enable_busmaster(dev);
+
+ count = sc->sc_hw.qhw_num_banks + 1;
+ if (pci_msix_count(dev) < count) {
+ device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n",
+ pci_msix_count(dev), count);
+ goto fail;
+ }
+ error = pci_alloc_msix(dev, &count);
+ if (error != 0) {
+ device_printf(dev, "failed to allocate MSI-X vectors\n");
+ goto fail;
+ }
+
+ error = qat_init(dev);
+ if (error == 0)
+ return 0;
+
+fail:
+ qat_detach(dev);
+ return ENXIO;
+}
+
+static int
+qat_init(device_t dev)
+{
+ struct qat_softc *sc = device_get_softc(dev);
+ int error;
+
+ qat_etr_init(sc);
+
+ if (sc->sc_hw.qhw_init_admin_comms != NULL &&
+ (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not initialize admin comms: %d\n", error);
+ return error;
+ }
+
+ if (sc->sc_hw.qhw_init_arb != NULL &&
+ (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not initialize hw arbiter: %d\n", error);
+ return error;
+ }
+
+ error = qat_ae_init(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not initialize Acceleration Engine: %d\n", error);
+ return error;
+ }
+
+ error = qat_aefw_load(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not load firmware: %d\n", error);
+ return error;
+ }
+
+ error = qat_setup_msix_intr(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not setup interrupts: %d\n", error);
+ return error;
+ }
+
+ sc->sc_hw.qhw_enable_intr(sc);
+
+ error = qat_crypto_init(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not initialize service: %d\n", error);
+ return error;
+ }
+
+ if (sc->sc_hw.qhw_enable_error_correction != NULL)
+ sc->sc_hw.qhw_enable_error_correction(sc);
+
+ if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
+ (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not initialize watchdog timer: %d\n", error);
+ return error;
+ }
+
+ error = qat_start(dev);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not start: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+qat_start(device_t dev)
+{
+ struct qat_softc *sc = device_get_softc(dev);
+ int error;
+
+ error = qat_ae_start(sc);
+ if (error)
+ return error;
+
+ if (sc->sc_hw.qhw_send_admin_init != NULL &&
+ (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
+ return error;
+ }
+
+ error = qat_crypto_start(sc);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int
+qat_detach(device_t dev)
+{
+ struct qat_softc *sc;
+ int bar, i;
+
+ sc = device_get_softc(dev);
+
+ qat_crypto_stop(sc);
+ qat_crypto_deinit(sc);
+ qat_aefw_unload(sc);
+
+ if (sc->sc_etr_banks != NULL) {
+ for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
+ struct qat_bank *qb = &sc->sc_etr_banks[i];
+
+ if (qb->qb_ih_cookie != NULL)
+ (void)bus_teardown_intr(dev, qb->qb_ih,
+ qb->qb_ih_cookie);
+ if (qb->qb_ih != NULL)
+ (void)bus_release_resource(dev, SYS_RES_IRQ,
+ i + 1, qb->qb_ih);
+ }
+ }
+ if (sc->sc_ih_cookie != NULL) {
+ (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie);
+ sc->sc_ih_cookie = NULL;
+ }
+ if (sc->sc_ih != NULL) {
+ (void)bus_release_resource(dev, SYS_RES_IRQ, i + 1, sc->sc_ih);
+ sc->sc_ih = NULL;
+ }
+ pci_release_msi(dev);
+
+ qat_etr_deinit(sc);
+
+ for (bar = 0; bar < MAX_BARS; bar++) {
+ if (sc->sc_res[bar] != NULL) {
+ (void)bus_release_resource(dev, SYS_RES_MEMORY,
+ sc->sc_rid[bar], sc->sc_res[bar]);
+ sc->sc_res[bar] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+void *
+qat_alloc_mem(size_t size)
+{
+ return (malloc(size, M_QAT, M_WAITOK | M_ZERO));
+}
+
+void
+qat_free_mem(void *ptr)
+{
+ free(ptr, M_QAT);
+}
+
+static void
+qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error)
+{
+ struct qat_dmamem *qdm;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
+ qdm = arg;
+ qdm->qdm_dma_seg = segs[0];
+}
+
+int
+qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
+ int nseg, bus_size_t size, bus_size_t alignment)
+{
+ int error;
+
+ KASSERT(qdm->qdm_dma_vaddr == NULL,
+ ("%s: DMA memory descriptor in use", __func__));
+
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
+ alignment, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ nseg, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &qdm->qdm_dma_tag);
+ if (error != 0)
+ return error;
+
+ error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &qdm->qdm_dma_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "couldn't allocate dmamem, error = %d\n", error);
+ goto fail_0;
+ }
+
+ error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map,
+ qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm,
+ BUS_DMA_NOWAIT);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "couldn't load dmamem map, error = %d\n", error);
+ goto fail_1;
+ }
+
+ return 0;
+fail_1:
+ bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map);
+fail_0:
+ bus_dma_tag_destroy(qdm->qdm_dma_tag);
+ return error;
+}
+
+void
+qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
+{
+ if (qdm->qdm_dma_tag != NULL) {
+ bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map);
+ bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr,
+ qdm->qdm_dma_map);
+ bus_dma_tag_destroy(qdm->qdm_dma_tag);
+ explicit_bzero(qdm, sizeof(*qdm));
+ }
+}
+
+static int
+qat_setup_msix_intr(struct qat_softc *sc)
+{
+ device_t dev;
+ int error, i, rid;
+
+ dev = sc->sc_dev;
+
+ for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) {
+ struct qat_bank *qb = &sc->sc_etr_banks[i - 1];
+
+ rid = i;
+ qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE);
+ if (qb->qb_ih == NULL) {
+ device_printf(dev,
+ "failed to allocate bank intr resource\n");
+ return ENXIO;
+ }
+ error = bus_setup_intr(dev, qb->qb_ih,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb,
+ &qb->qb_ih_cookie);
+ if (error != 0) {
+ device_printf(dev, "failed to set up bank intr\n");
+ return error;
+ }
+ error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus);
+ if (error != 0)
+ device_printf(dev, "failed to bind intr %d\n", i);
+ }
+
+ rid = i;
+ sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE);
+ if (sc->sc_ih == NULL)
+ return ENXIO;
+ error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie);
+
+ return error;
+}
+
+static void
+qat_etr_init(struct qat_softc *sc)
+{
+ int i;
+
+ sc->sc_etr_banks = qat_alloc_mem(
+ sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
+
+ for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
+ qat_etr_bank_init(sc, i);
+
+ if (sc->sc_hw.qhw_num_ap_banks) {
+ sc->sc_etr_ap_banks = qat_alloc_mem(
+ sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
+ qat_etr_ap_bank_init(sc);
+ }
+}
+
+static void
+qat_etr_deinit(struct qat_softc *sc)
+{
+ int i;
+
+ if (sc->sc_etr_banks != NULL) {
+ for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
+ qat_etr_bank_deinit(sc, i);
+ qat_free_mem(sc->sc_etr_banks);
+ sc->sc_etr_banks = NULL;
+ }
+ if (sc->sc_etr_ap_banks != NULL) {
+ qat_free_mem(sc->sc_etr_ap_banks);
+ sc->sc_etr_ap_banks = NULL;
+ }
+}
+
+static void
+qat_etr_bank_init(struct qat_softc *sc, int bank)
+{
+ struct qat_bank *qb = &sc->sc_etr_banks[bank];
+ int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
+
+ MPASS(bank < sc->sc_hw.qhw_num_banks);
+
+ mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF);
+
+ qb->qb_sc = sc;
+ qb->qb_bank = bank;
+ qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
+
+ /* Clean CSRs for all rings within the bank */
+ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
+ struct qat_ring *qr = &qb->qb_et_rings[i];
+
+ qat_etr_bank_ring_write_4(sc, bank, i,
+ ETR_RING_CONFIG, 0);
+ qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
+
+ if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
+ qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
+ } else if (sc->sc_hw.qhw_tx_rings_mask &
+ (1 << (i - tx_rx_gap))) {
+ /* Share inflight counter with rx and tx */
+ qr->qr_inflight =
+ qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
+ }
+ }
+
+ if (sc->sc_hw.qhw_init_etr_intr != NULL) {
+ sc->sc_hw.qhw_init_etr_intr(sc, bank);
+ } else {
+ /* common code in qat 1.7 */
+ qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
+ ETR_INT_REG_CLEAR_MASK);
+ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
+ ETR_RINGS_PER_INT_SRCSEL; i++) {
+ qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
+ (i * ETR_INT_SRCSEL_NEXT_OFFSET),
+ ETR_INT_SRCSEL_MASK);
+ }
+ }
+}
+
+static void
+qat_etr_bank_deinit(struct qat_softc *sc, int bank)
+{
+ struct qat_bank *qb;
+ struct qat_ring *qr;
+ int i;
+
+ qb = &sc->sc_etr_banks[bank];
+ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
+ if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
+ qr = &qb->qb_et_rings[i];
+ qat_free_mem(qr->qr_inflight);
+ }
+ }
+}
+
+static void
+qat_etr_ap_bank_init(struct qat_softc *sc)
+{
+ int ap_bank;
+
+ for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
+ struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
+
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
+ ETR_AP_NF_MASK_INIT);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
+ ETR_AP_NE_MASK_INIT);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
+
+ memset(qab, 0, sizeof(*qab));
+ }
+}
+
+static void
+qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
+{
+ if (set_mask)
+ *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
+ else
+ *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list