svn commit: r361467 - in stable/11: share/man/man4 sys/contrib/ena-com sys/contrib/ena-com/ena_defs sys/dev/ena sys/modules/ena

Marcin Wojtas mw at FreeBSD.org
Mon May 25 17:41:21 UTC 2020


Author: mw
Date: Mon May 25 17:41:20 2020
New Revision: 361467
URL: https://svnweb.freebsd.org/changeset/base/361467

Log:
  MFC: Merge ENA v2.0.0 driver
  
  r348416 Update ENA version to v2.0.0
  r348414 Fix ENA manual issues
  r348413 Improve ENA reset handling
  r348412 Fix NULL pointer dereference in ena_up()
  r348411 Unify new line characters in the ENA driver
  r348410 Fix Tx offloads for fragmented pkt headers in ENA
  r348409 Split ENA reset routine into restore and destroy stages
  r348408 Use bitfield for storing global ENA device states
  r348407 Fix error handling when ENA reset fails
  r348406 Fill bdf field of the host_info structure in ENA
  r348405 Add additional doorbells on ENA Tx path
  r348404 Limit maximum size of Rx refill threshold in ENA
  r348403 Add support for the LLQv2 and WC in ENA
  r348402 Lock optimization in ENA
  r348401 Add tuneable drbr ring size and hw queues depth for ENA
  r348400 Fix error in validate_tx_req_id() in ENA
  r348399 Change attach order to prevent crash upon failure in ENA
  r348398 Change order of ifp release on ENA detach
  r348397 Check for number of MSI-x upon partial allocation in ENA
  r348396 Set error value when allocation of IO irq fails in ENA
  r348395 Set vaddr and paddr as NULL when DMA alloc fails in ENA
  r348394 Fix DMA synchronization in the ENA driver Tx and Rx paths
  r348393 Check for missing MSI-x and Tx completions in ENA
  r348392 Fill number of CPUs field on ENA host_info structure
  r348391 Print ENA Tx error conditionally
  r348390 Trigger reset in ENA if there are too many Rx descriptors
  r348389 Remove RSS support in ENA
  r348388 Add notification AENQ handler for ENA
  r348387 Print information when ENA admin error occurs
  r348386 Do not specify active media type in ENA
  r348385 Adjust ENA driver to the new ena-com
  
  Sponsored by:  Amazon, Inc.

Modified:
  stable/11/share/man/man4/ena.4
  stable/11/sys/contrib/ena-com/ena_com.c
  stable/11/sys/contrib/ena-com/ena_com.h
  stable/11/sys/contrib/ena-com/ena_defs/ena_admin_defs.h
  stable/11/sys/contrib/ena-com/ena_defs/ena_common_defs.h
  stable/11/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
  stable/11/sys/contrib/ena-com/ena_defs/ena_gen_info.h
  stable/11/sys/contrib/ena-com/ena_defs/ena_regs_defs.h
  stable/11/sys/contrib/ena-com/ena_eth_com.c
  stable/11/sys/contrib/ena-com/ena_eth_com.h
  stable/11/sys/contrib/ena-com/ena_plat.h
  stable/11/sys/dev/ena/ena.c
  stable/11/sys/dev/ena/ena.h
  stable/11/sys/dev/ena/ena_sysctl.c
  stable/11/sys/dev/ena/ena_sysctl.h
  stable/11/sys/modules/ena/Makefile
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/share/man/man4/ena.4
==============================================================================
--- stable/11/share/man/man4/ena.4	Mon May 25 16:40:48 2020	(r361466)
+++ stable/11/share/man/man4/ena.4	Mon May 25 17:41:20 2020	(r361467)
@@ -27,7 +27,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd May 04, 2017
+.Dd August 16, 2017
 .Dt ENA 4
 .Os
 .Sh NAME
@@ -35,7 +35,7 @@
 .Nd "FreeBSD kernel driver for Elastic Network Adapter (ENA) family"
 .Sh SYNOPSIS
 To compile this driver into the kernel,
-place the following line in your
+place the following line in the
 kernel configuration file:
 .Bd -ragged -offset indent
 .Cd "device ena"
@@ -59,8 +59,9 @@ The driver supports a range of ENA devices, is link-sp
 (i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc.), and has
 a negotiated and extendable feature set.
 .Pp
-Some ENA devices support SR-IOV. This driver is used for both the
-SR-IOV Physical Function (PF) and Virtual Function (VF) devices.
+Some ENA devices support SR-IOV.
+This driver is used for both the SR-IOV Physical Function (PF) and Virtual
+Function (VF) devices.
 .Pp
 The ENA devices enable high speed and low overhead network traffic
 processing by providing multiple Tx/Rx queue pairs (the maximum number
@@ -82,8 +83,8 @@ to recover in a manner transparent to the application,
 debug logs.
 .Pp
 Some of the ENA devices support a working mode called Low-latency
-Queue (LLQ), which saves several more microseconds. This feature will
-be implemented for driver in future releases.
+Queue (LLQ), which saves several more microseconds.
+This feature will be implemented for driver in future releases.
 .Sh HARDWARE
 Supported PCI vendor ID/device IDs:
 .Pp
@@ -105,19 +106,23 @@ Supported PCI vendor ID/device IDs:
 Error occurred during initialization of the mmio register read request.
 .It ena%d: Can not reset device
 .Pp
-Device could not be reset; device may not be responding or is already
-during reset.
+Device could not be reset.
+.br
+Device may not be responding or is already during reset.
 .It ena%d: device version is too low
 .Pp
-Version of the controller is too low and it is not supported by the driver.
+Version of the controller is too old and it is not supported by the driver.
 .It ena%d: Invalid dma width value %d
 .Pp
-The controller is able to request dma transcation width. Device stopped
-responding or it demanded invalid value.
+The controller is able to request dma transaction width.
+.br
+Device stopped responding or it demanded invalid value.
 .It ena%d: Can not initialize ena admin queue with device
 .Pp
-Initialization of the Admin Queue failed; device may not be responding or there
-was a problem with initialization of the resources.
+Initialization of the Admin Queue failed.
+.br
+Device may not be responding or there was a problem with initialization of
+the resources.
 .It ena%d: Cannot get attribute for ena device rc: %d
 .Pp
 Failed to get attributes of the device from the controller.
@@ -141,11 +146,14 @@ Errors occurred when trying to configure AENQ groups.
 .It ena%d: could not allocate irq vector: %d
 .It ena%d: Unable to allocate bus resource: registers
 .Pp
-Resource allocation failed when initializing the device; driver will not
-be attached.
+Resource allocation failed when initializing the device.
+.br
+Driver will not be attached.
 .It ena%d: ENA device init failed (err: %d)
 .Pp
-Device initialization failed; driver will not be attached.
+Device initialization failed.
+.br
+Driver will not be attached.
 .It ena%d: could not activate irq vector: %d
 .Pp
 Error occurred when trying to activate interrupt vectors for Admin Queue.
@@ -157,13 +165,16 @@ Error occurred when trying to register Admin Queue int
 Error occurred during configuration of the Admin Queue interrupts.
 .It ena%d: Enable MSI-X failed
 .Pp
-Configuration of the MSI-X for Admin Queue failed; there could be lack
-of resources or interrupts could not have been configured; driver will
-not be attached.
+Configuration of the MSI-X for Admin Queue failed.
+.br
+There could be lack of resources or interrupts could not have been configured.
+.br
+Driver will not be attached.
 .It ena%d: VLAN is in use, detach first
 .Pp
-VLANs are being used when trying to detach the driver; VLANs should be detached
-first and then detach routine should be called again.
+VLANs are being used when trying to detach the driver.
+.br
+VLANs must be detached first and then detach routine have to be called again.
 .It ena%d: Unmapped RX DMA tag associations
 .It ena%d: Unmapped TX DMA tag associations
 .Pp
@@ -175,8 +186,9 @@ Error occurred when trying to destroy RX/TX DMA tag.
 .It ena%d: Cannot fill hash control
 .It ena%d: WARNING: RSS was not properly initialized, it will affect bandwidth
 .Pp
-Error occurred during initialization of one of RSS resources; device is still
-going to work but it will affect performance because all RX packets will be
+Error occurred during initialization of one of RSS resources.
+.br
+The device will work with reduced performance because all RX packets will be
 passed to queue 0 and there will be no hash information.
 .It ena%d: failed to tear down irq: %d
 .It ena%d: dev has no parent while releasing res for irq: %d
@@ -196,16 +208,20 @@ Requested MTU value is not supported and will not be s
 Device stopped responding and will be reset.
 .It ena%d: Found a Tx that wasn't completed on time, qid %d, index %d.
 .Pp
-Packet was pushed to the NIC but not sent within given time limit; it may
-be caused by hang of the IO queue.
+Packet was pushed to the NIC but not sent within given time limit.
+.br
+It may be caused by hang of the IO queue.
 .It ena%d: The number of lost tx completion is aboce the threshold (%d > %d). Reset the device
 .Pp
-If too many Tx wasn't completed on time the device is going to be reset; it may
-be caused by hanged queue or device.
+If too many Tx wasn't completed on time the device is going to be reset.
+.br
+It may be caused by hanged queue or device.
 .It ena%d: trigger reset is on
 .Pp
-Device will be reset; reset is triggered either by watchdog or if too many TX
-packets were not completed on time.
+Device will be reset.
+.br
+Reset is triggered either by watchdog or if too many TX packets were not
+completed on time.
 .It ena%d: invalid value recvd
 .Pp
 Link status received from the device in the AENQ handler is invalid.
@@ -220,7 +236,9 @@ Link status received from the device in the AENQ handl
 .It ena%d: could not allocate irq vector: %d
 .It ena%d: failed to register interrupt handler for irq %ju: %d
 .Pp
-IO resources initialization failed. Interface will not be brought up.
+IO resources initialization failed.
+.br
+Interface will not be brought up.
 .It ena%d: LRO[%d] Initialization failed!
 .Pp
 Initialization of the LRO for the RX ring failed.
@@ -228,20 +246,26 @@ Initialization of the LRO for the RX ring failed.
 .It ena%d: failed to add buffer for rx queue %d
 .It ena%d: refilled rx queue %d with %d pages only
 .Pp
-Allocation of resources used on RX path failed; if happened during
-initialization of the IO queue, the interface will not be brought up.
+Allocation of resources used on RX path failed.
+.br
+If happened during initialization of the IO queue, the interface will not be
+brought up.
 .It ena%d: ioctl promisc/allmulti
 .Pp
-IOCTL request for the device to work in promiscuous/allmulti mode; see
+IOCTL request for the device to work in promiscuous/allmulti mode.
+.br
+See
 .Xr ifconfig 8
 for more details.
 .It ena%d: too many fragments. Last fragment: %d!
 .Pp
 Packet with unsupported number of segments was queued for sending to the
-device; packet will be dropped.
+device.
+.br
+Packet will be dropped.
 .Sh SUPPORT
-If an issue is identified with the released source code with a supported adapter
-email the specific information related to the issue to
+If an issue is identified with the released source code with a supported
+adapter, please email the specific information related to the issue to
 .Aq Mt mk at semihalf.com
 and
 .Aq Mt mw at semihalf.com .

Modified: stable/11/sys/contrib/ena-com/ena_com.c
==============================================================================
--- stable/11/sys/contrib/ena-com/ena_com.c	Mon May 25 16:40:48 2020	(r361466)
+++ stable/11/sys/contrib/ena-com/ena_com.c	Mon May 25 17:41:20 2020	(r361467)
@@ -1,7 +1,7 @@
 /*-
  * BSD LICENSE
  *
- * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -32,9 +32,6 @@
  */
 
 #include "ena_com.h"
-#ifdef ENA_INTERNAL
-#include "ena_gen_info.h"
-#endif
 
 /*****************************************************************************/
 /*****************************************************************************/
@@ -52,9 +49,6 @@
 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
 
 #endif /* ENA_EXTENDED_STATS */
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
-		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
-		| (ENA_COMMON_SPEC_VERSION_MINOR))
 
 #define ENA_CTRL_MAJOR		0
 #define ENA_CTRL_MINOR		0
@@ -76,6 +70,8 @@
 
 #define ENA_REGS_ADMIN_INTR_MASK 1
 
+#define ENA_POLL_MS	5
+
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
@@ -112,8 +108,8 @@ static inline int ena_com_mem_addr_set(struct ena_com_
 		return ENA_COM_INVAL;
 	}
 
-	ena_addr->mem_addr_low = (u32)addr;
-	ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
+	ena_addr->mem_addr_low = lower_32_bits(addr);
+	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
 
 	return 0;
 }
@@ -127,7 +123,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_
 			       sq->mem_handle);
 
 	if (!sq->entries) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -149,7 +145,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_
 			       cq->mem_handle);
 
 	if (!cq->entries)  {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -174,7 +170,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev 
 			aenq->mem_handle);
 
 	if (!aenq->entries) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -249,7 +245,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd
 	tail_masked = admin_queue->sq.tail & queue_size_mask;
 
 	/* In case of queue FULL */
-	cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
+	cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
 	if (cnt >= admin_queue->q_depth) {
 		ena_trc_dbg("admin queue is full.\n");
 		admin_queue->stats.out_of_space++;
@@ -301,7 +297,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_co
 
 	queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
 	if (unlikely(!queue->comp_ctx)) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -320,7 +316,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(s
 						     struct ena_admin_acq_entry *comp,
 						     size_t comp_size_in_bytes)
 {
-	unsigned long flags;
+	unsigned long flags = 0;
 	struct ena_comp_ctx *comp_ctx;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
@@ -332,7 +328,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(s
 					      cmd_size_in_bytes,
 					      comp,
 					      comp_size_in_bytes);
-	if (unlikely(IS_ERR(comp_ctx)))
+	if (IS_ERR(comp_ctx))
 		admin_queue->running_state = false;
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
@@ -348,6 +344,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 
 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
+	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
 	io_sq->desc_entry_size =
 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 		sizeof(struct ena_eth_io_tx_desc) :
@@ -373,18 +370,21 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 		}
 
 		if (!io_sq->desc_addr.virt_addr) {
-			ena_trc_err("memory allocation failed");
+			ena_trc_err("memory allocation failed\n");
 			return ENA_COM_NO_MEM;
 		}
 	}
 
 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 		/* Allocate bounce buffers */
-		io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
-		io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+		io_sq->bounce_buf_ctrl.buffer_size =
+			ena_dev->llq_info.desc_list_entry_size;
+		io_sq->bounce_buf_ctrl.buffers_num =
+			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 		io_sq->bounce_buf_ctrl.next_to_use = 0;
 
-		size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
+		size = io_sq->bounce_buf_ctrl.buffer_size *
+			io_sq->bounce_buf_ctrl.buffers_num;
 
 		ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
 				   size,
@@ -395,11 +395,12 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 			io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
 
 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
-			ena_trc_err("bounce buffer memory allocation failed");
+			ena_trc_err("bounce buffer memory allocation failed\n");
 			return ENA_COM_NO_MEM;
 		}
 
-		memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
+		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+		       sizeof(io_sq->llq_info));
 
 		/* Initiate the first bounce buffer */
 		io_sq->llq_buf_ctrl.curr_bounce_buf =
@@ -408,6 +409,10 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 		       0x0, io_sq->llq_info.desc_list_entry_size);
 		io_sq->llq_buf_ctrl.descs_left_in_line =
 			io_sq->llq_info.descs_num_before_header;
+
+		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
+			io_sq->entries_in_tx_burst_left =
+				io_sq->llq_info.max_entries_in_tx_burst;
 	}
 
 	io_sq->tail = 0;
@@ -451,7 +456,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_
 	}
 
 	if (!io_cq->cdesc_addr.virt_addr) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -500,12 +505,12 @@ static void ena_com_handle_admin_completion(struct ena
 	cqe = &admin_queue->cq.entries[head_masked];
 
 	/* Go over all the completions */
-	while ((cqe->acq_common_descriptor.flags &
+	while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
 			ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
 		/* Do not read the rest of the completion entry before the
 		 * phase bit was validated
 		 */
-		rmb();
+		dma_rmb();
 		ena_com_handle_single_admin_completion(admin_queue, cqe);
 
 		head_masked++;
@@ -552,7 +557,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 						     struct ena_com_admin_queue *admin_queue)
 {
-	unsigned long flags, timeout;
+	unsigned long flags = 0;
+	unsigned long timeout;
 	int ret;
 
 	timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
@@ -577,7 +583,7 @@ static int ena_com_wait_and_process_admin_cq_polling(s
 			goto err;
 		}
 
-		ENA_MSLEEP(100);
+		ENA_MSLEEP(ENA_POLL_MS);
 	}
 
 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -598,42 +604,113 @@ err:
 	return ret;
 }
 
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+	struct ena_com_admin_queue *admin_queue;
+	struct ena_admin_set_feat_cmd cmd;
+	struct ena_admin_set_feat_resp resp;
+	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+	int ret;
+
+	memset(&cmd, 0x0, sizeof(cmd));
+	admin_queue = &ena_dev->admin_queue;
+
+	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+	ret = ena_com_execute_admin_command(admin_queue,
+					    (struct ena_admin_aq_entry *)&cmd,
+					    sizeof(cmd),
+					    (struct ena_admin_acq_entry *)&resp,
+					    sizeof(resp));
+
+	if (unlikely(ret))
+		ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
+
+	return ret;
+}
+
 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
-				   struct ena_admin_feature_llq_desc *llq_desc)
+				   struct ena_admin_feature_llq_desc *llq_features,
+				   struct ena_llq_configurations *llq_default_cfg)
 {
 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+	u16 supported_feat;
+	int rc;
 
 	memset(llq_info, 0, sizeof(*llq_info));
 
-	switch (llq_desc->header_location_ctrl) {
-	case ENA_ADMIN_INLINE_HEADER:
-		llq_info->inline_header = true;
-		break;
-	case ENA_ADMIN_HEADER_RING:
-		llq_info->inline_header = false;
-		break;
-	default:
-		ena_trc_err("Invalid header location control\n");
+	supported_feat = llq_features->header_location_ctrl_supported;
+
+	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+		llq_info->header_location_ctrl =
+			llq_default_cfg->llq_header_location;
+	} else {
+		ena_trc_err("Invalid header location control, supported: 0x%x\n",
+			    supported_feat);
 		return -EINVAL;
 	}
 
-	switch (llq_desc->entry_size_ctrl) {
-	case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
-		llq_info->desc_list_entry_size = 128;
-		break;
-	case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
-		llq_info->desc_list_entry_size = 192;
-		break;
-	case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
-		llq_info->desc_list_entry_size = 256;
-		break;
-	default:
-		ena_trc_err("Invalid entry_size_ctrl %d\n",
-			    llq_desc->entry_size_ctrl);
-		return -EINVAL;
+	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+		supported_feat = llq_features->descriptors_stride_ctrl_supported;
+		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+		} else	{
+			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+			} else {
+				ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+					    supported_feat);
+				return -EINVAL;
+			}
+
+			ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+				    llq_default_cfg->llq_stride_ctrl,
+				    supported_feat,
+				    llq_info->desc_stride_ctrl);
+		}
+	} else {
+		llq_info->desc_stride_ctrl = 0;
 	}
 
-	if ((llq_info->desc_list_entry_size & 0x7)) {
+	supported_feat = llq_features->entry_size_ctrl_supported;
+	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+	} else {
+		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+			llq_info->desc_list_entry_size = 128;
+		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+			llq_info->desc_list_entry_size = 192;
+		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+			llq_info->desc_list_entry_size = 256;
+		} else {
+			ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+			return -EINVAL;
+		}
+
+		ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+			    llq_default_cfg->llq_ring_entry_size,
+			    supported_feat,
+			    llq_info->desc_list_entry_size);
+	}
+	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
 		/* The desc list entry size should be whole multiply of 8
 		 * This requirement comes from __iowrite64_copy()
 		 */
@@ -642,35 +719,50 @@ static int ena_com_config_llq_info(struct ena_com_dev 
 		return -EINVAL;
 	}
 
-	if (llq_info->inline_header) {
-		llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
-		if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
-		    (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
-			ena_trc_err("Invalid desc_stride_ctrl %d\n",
-				    llq_info->desc_stride_ctrl);
-			return -EINVAL;
-		}
-	} else {
-		llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
-	}
-
-	if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
 			sizeof(struct ena_eth_io_tx_desc);
 	else
 		llq_info->descs_per_entry = 1;
 
-	llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
+	supported_feat = llq_features->desc_num_before_header_supported;
+	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+	} else {
+		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+		} else {
+			ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
+				    supported_feat);
+			return -EINVAL;
+		}
 
-	return 0;
-}
+		ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+			    llq_default_cfg->llq_num_decs_before_header,
+			    supported_feat,
+			    llq_info->descs_num_before_header);
+	}
 
+	llq_info->max_entries_in_tx_burst =
+		(u16)(llq_features->max_tx_burst_size /	llq_default_cfg->llq_ring_entry_size_value);
 
+	rc = ena_com_set_llq(ena_dev);
+	if (rc)
+		ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
 
+	return 0;
+}
+
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
 							struct ena_com_admin_queue *admin_queue)
 {
-	unsigned long flags;
+	unsigned long flags = 0;
 	int ret;
 
 	ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
@@ -715,7 +807,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
 		mmio_read->read_resp;
 	u32 mmio_read_reg, ret, i;
-	unsigned long flags;
+	unsigned long flags = 0;
 	u32 timeout = mmio_read->reg_read_to;
 
 	ENA_MIGHT_SLEEP();
@@ -736,15 +828,11 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
 	mmio_read_reg |= mmio_read->seq_num &
 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
-	/* make sure read_resp->req_id get updated before the hw can write
-	 * there
-	 */
-	wmb();
+	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
+			ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
-	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
-
 	for (i = 0; i < timeout; i++) {
-		if (read_resp->req_id == mmio_read->seq_num)
+		if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
 			break;
 
 		ENA_UDELAY(1);
@@ -761,7 +849,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
 	}
 
 	if (read_resp->reg_off != offset) {
-		ena_trc_err("Read failure: wrong offset provided");
+		ena_trc_err("Read failure: wrong offset provided\n");
 		ret = ENA_MMIO_READ_TIMEOUT;
 	} else {
 		ret = read_resp->reg_val;
@@ -856,7 +944,6 @@ static void ena_com_io_queue_free(struct ena_com_dev *
 	}
 
 	if (io_sq->bounce_buf_ctrl.base_buffer) {
-		size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 		ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
 	}
@@ -867,6 +954,9 @@ static int wait_for_reset_state(struct ena_com_dev *en
 {
 	u32 val, i;
 
+	/* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+	timeout = (timeout * 100) / ENA_POLL_MS;
+
 	for (i = 0; i < timeout; i++) {
 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
@@ -879,8 +969,7 @@ static int wait_for_reset_state(struct ena_com_dev *en
 			exp_state)
 			return 0;
 
-		/* The resolution of the timeout is 100ms */
-		ENA_MSLEEP(100);
+		ENA_MSLEEP(ENA_POLL_MS);
 	}
 
 	return ENA_COM_TIMER_EXPIRED;
@@ -903,7 +992,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 				  struct ena_admin_get_feat_resp *get_resp,
 				  enum ena_admin_aq_feature_id feature_id,
 				  dma_addr_t control_buf_dma_addr,
-				  u32 control_buff_size)
+				  u32 control_buff_size,
+				  u8 feature_ver)
 {
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_get_feat_cmd get_cmd;
@@ -934,7 +1024,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 	}
 
 	get_cmd.control_buffer.length = control_buff_size;
-
+	get_cmd.feat_common.feature_version = feature_ver;
 	get_cmd.feat_common.feature_id = feature_id;
 
 	ret = ena_com_execute_admin_command(admin_queue,
@@ -954,13 +1044,15 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 
 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
 			       struct ena_admin_get_feat_resp *get_resp,
-			       enum ena_admin_aq_feature_id feature_id)
+			       enum ena_admin_aq_feature_id feature_id,
+			       u8 feature_ver)
 {
 	return ena_com_get_feature_ex(ena_dev,
 				      get_resp,
 				      feature_id,
 				      0,
-				      0);
+				      0,
+				      feature_ver);
 }
 
 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1030,7 +1122,7 @@ static int ena_com_indirect_table_allocate(struct ena_
 	int ret;
 
 	ret = ena_com_get_feature(ena_dev, &get_resp,
-				  ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+				  ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
 	if (unlikely(ret))
 		return ret;
 
@@ -1269,7 +1361,7 @@ int ena_com_execute_admin_command(struct ena_com_admin
 
 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
 					    comp, comp_size);
-	if (unlikely(IS_ERR(comp_ctx))) {
+	if (IS_ERR(comp_ctx)) {
 		if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
 			ena_trc_dbg("Failed to submit command [%ld]\n",
 				    PTR_ERR(comp_ctx));
@@ -1389,12 +1481,12 @@ void ena_com_abort_admin_commands(struct ena_com_dev *
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
-		ENA_MSLEEP(20);
+		ENA_MSLEEP(ENA_POLL_MS);
 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	}
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -1433,7 +1525,7 @@ bool ena_com_get_admin_running_state(struct ena_com_de
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	ena_dev->admin_queue.running_state = state;
@@ -1460,14 +1552,14 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_de
 	struct ena_admin_get_feat_resp get_resp;
 	int ret;
 
-	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
 	if (ret) {
 		ena_trc_info("Can't get aenq configuration\n");
 		return ret;
 	}
 
 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
-		ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+		ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
 			     get_resp.u.aenq.supported_groups,
 			     groups_flag);
 		return ENA_COM_UNSUPPORTED;
@@ -1542,11 +1634,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_d
 		     ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
 		     ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
 
-	if (ver < MIN_ENA_VER) {
-		ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
-		return -1;
-	}
-
 	ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
 		     >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1579,9 +1666,6 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 	u16 size;
 
 	ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
-
-	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
-
 	if (admin_queue->comp_ctx)
 		ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
 	admin_queue->comp_ctx = NULL;
@@ -1602,6 +1686,7 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
 				      aenq->dma_addr, aenq->mem_handle);
 	aenq->entries = NULL;
+	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
 }
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
@@ -1611,7 +1696,8 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev
 	if (polling)
 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
 
-	ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
+	ENA_REG_WRITE32(ena_dev->bus, mask_value,
+			ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
 	ena_dev->admin_queue.polling = polling;
 }
 
@@ -1626,7 +1712,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_
 			       mmio_read->read_resp_dma_addr,
 			       mmio_read->read_resp_mem_handle);
 	if (unlikely(!mmio_read->read_resp))
-		return ENA_COM_NO_MEM;
+		goto err;
 
 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
 
@@ -1635,6 +1721,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_
 	mmio_read->readless_supported = true;
 
 	return 0;
+
+err:
+		ENA_SPINLOCK_DESTROY(mmio_read->lock);
+		return ENA_COM_NO_MEM;
 }
 
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1658,7 +1748,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_
 			      mmio_read->read_resp_mem_handle);
 
 	mmio_read->read_resp = NULL;
-
 	ENA_SPINLOCK_DESTROY(mmio_read->lock);
 }
 
@@ -1675,17 +1764,12 @@ void ena_com_mmio_reg_read_request_write_dev_addr(stru
 }
 
 int ena_com_admin_init(struct ena_com_dev *ena_dev,
-		       struct ena_aenq_handlers *aenq_handlers,
-		       bool init_spinlock)
+		       struct ena_aenq_handlers *aenq_handlers)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
 	int ret;
 
-#ifdef ENA_INTERNAL
-	ena_trc_info("ena_defs : Version:[%s] Build date [%s]",
-		     ENA_GEN_COMMIT, ENA_GEN_DATE);
-#endif
 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
@@ -1707,8 +1791,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
 
 	ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
 
-	if (init_spinlock)
-		ENA_SPINLOCK_INIT(admin_queue->q_lock);
+	ENA_SPINLOCK_INIT(admin_queue->q_lock);
 
 	ret = ena_com_init_comp_ctxt(admin_queue);
 	if (ret)
@@ -1848,9 +1931,65 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_
 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
 			    struct ena_admin_get_feat_resp *resp)
 {
-	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
 }
 
+int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
+{
+	struct ena_admin_get_feat_resp resp;
+	struct ena_extra_properties_strings *extra_properties_strings =
+			&ena_dev->extra_properties_strings;
+	u32 rc;
+	extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
+		ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
+
+	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+			       extra_properties_strings->size,
+			       extra_properties_strings->virt_addr,
+			       extra_properties_strings->dma_addr,
+			       extra_properties_strings->dma_handle);
+	if (unlikely(!extra_properties_strings->virt_addr)) {
+		ena_trc_err("Failed to allocate extra properties strings\n");
+		return 0;
+	}
+
+	rc = ena_com_get_feature_ex(ena_dev, &resp,
+				    ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
+				    extra_properties_strings->dma_addr,
+				    extra_properties_strings->size, 0);
+	if (rc) {
+		ena_trc_dbg("Failed to get extra properties strings\n");
+		goto err;
+	}
+
+	return resp.u.extra_properties_strings.count;
+err:
+	ena_com_delete_extra_properties_strings(ena_dev);
+	return 0;
+}
+
+void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
+{
+	struct ena_extra_properties_strings *extra_properties_strings =
+				&ena_dev->extra_properties_strings;
+
+	if (extra_properties_strings->virt_addr) {
+		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+				      extra_properties_strings->size,
+				      extra_properties_strings->virt_addr,
+				      extra_properties_strings->dma_addr,
+				      extra_properties_strings->dma_handle);
+		extra_properties_strings->virt_addr = NULL;
+	}
+}
+
+int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
+				       struct ena_admin_get_feat_resp *resp)
+{
+	return ena_com_get_feature(ena_dev, resp,
+				   ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
+}
+
 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
@@ -1858,7 +1997,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	int rc;
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_DEVICE_ATTRIBUTES);
+				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
 	if (rc)
 		return rc;
 
@@ -1866,17 +2005,34 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	       sizeof(get_resp.u.dev_attr));
 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
 
-	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_MAX_QUEUES_NUM);
-	if (rc)
-		return rc;
+	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+		rc = ena_com_get_feature(ena_dev, &get_resp,
+					 ENA_ADMIN_MAX_QUEUES_EXT,
+					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
+		if (rc)
+			return rc;
 
-	memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
-	       sizeof(get_resp.u.max_queue));
-	ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+			return -EINVAL;
 
+		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+		       sizeof(get_resp.u.max_queue_ext));
+		ena_dev->tx_max_header_size =
+			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
+	} else {
+		rc = ena_com_get_feature(ena_dev, &get_resp,
+					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
+		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+		       sizeof(get_resp.u.max_queue));
+		ena_dev->tx_max_header_size =
+			get_resp.u.max_queue.max_header_size;
+
+		if (rc)
+			return rc;
+	}
+
 	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_AENQ_CONFIG);
+				 ENA_ADMIN_AENQ_CONFIG, 0);
 	if (rc)
 		return rc;
 
@@ -1884,7 +2040,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	       sizeof(get_resp.u.aenq));

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list