svn commit: r325234 - in vendor-sys/ena-com/dist: . ena_defs
Marcin Wojtas
mw at FreeBSD.org
Tue Oct 31 12:20:50 UTC 2017
Author: mw
Date: Tue Oct 31 12:20:48 2017
New Revision: 325234
URL: https://svnweb.freebsd.org/changeset/base/325234
Log:
Update ena-com HAL to newest version
The newest ena-com HAL supports LLQv2 and introduces numerous API changes.
Obtained from: Amazon.com, Inc.
Added:
vendor-sys/ena-com/dist/ena_defs/
vendor-sys/ena-com/dist/ena_defs/ena_admin_defs.h (contents, props changed)
vendor-sys/ena-com/dist/ena_defs/ena_common_defs.h (contents, props changed)
vendor-sys/ena-com/dist/ena_defs/ena_eth_io_defs.h (contents, props changed)
vendor-sys/ena-com/dist/ena_defs/ena_gen_info.h (contents, props changed)
vendor-sys/ena-com/dist/ena_defs/ena_includes.h (contents, props changed)
vendor-sys/ena-com/dist/ena_defs/ena_regs_defs.h (contents, props changed)
Modified:
vendor-sys/ena-com/dist/ena_com.c
vendor-sys/ena-com/dist/ena_com.h
vendor-sys/ena-com/dist/ena_eth_com.c
vendor-sys/ena-com/dist/ena_eth_com.h
vendor-sys/ena-com/dist/ena_plat.h
Modified: vendor-sys/ena-com/dist/ena_com.c
==============================================================================
--- vendor-sys/ena-com/dist/ena_com.c Tue Oct 31 12:15:00 2017 (r325233)
+++ vendor-sys/ena-com/dist/ena_com.c Tue Oct 31 12:20:48 2017 (r325234)
@@ -45,6 +45,13 @@
#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
| (ENA_COMMON_SPEC_VERSION_MINOR))
@@ -65,6 +72,10 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
+
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -102,7 +113,7 @@ static inline int ena_com_mem_addr_set(struct ena_com_
}
ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high = (u64)addr >> 32;
+ ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
return 0;
}
@@ -238,12 +249,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail,
- admin_queue->sq.head,
- admin_queue->q_depth);
+ ena_trc_dbg("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@@ -278,6 +286,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd
if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
admin_queue->sq.phase = !admin_queue->sq.phase;
+ ENA_DB_SYNC(&admin_queue->sq.mem_handle);
ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
admin_queue->sq.db_addr);
@@ -362,21 +371,43 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
}
- } else {
+
+ if (!io_sq->desc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+ }
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Allocate bounce buffers */
+ io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
+ io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+ size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
+
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
size,
- io_sq->desc_addr.virt_addr,
+ io_sq->bounce_buf_ctrl.base_buffer,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr) {
- io_sq->desc_addr.virt_addr =
- ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+ ena_trc_err("bounce buffer memory allocation failed");
+ return ENA_COM_NO_MEM;
}
- }
- if (!io_sq->desc_addr.virt_addr) {
- ena_trc_err("memory allocation failed");
- return ENA_COM_NO_MEM;
+ memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
+
+ /* Initiate the first bounce buffer */
+ io_sq->llq_buf_ctrl.curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, io_sq->llq_info.desc_list_entry_size);
+ io_sq->llq_buf_ctrl.descs_left_in_line =
+ io_sq->llq_info.descs_num_before_header;
}
io_sq->tail = 0;
@@ -507,7 +538,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return ENA_COM_NO_MEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -532,7 +563,7 @@ static int ena_com_wait_and_process_admin_cq_polling(s
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
if (comp_ctx->status != ENA_CMD_SUBMITTED)
- break;
+ break;
if (ENA_TIME_EXPIRE(timeout)) {
ena_trc_err("Wait for completion (polling) timeout\n");
@@ -567,6 +598,75 @@ err:
return ret;
}
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_desc)
+{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+
+ memset(llq_info, 0, sizeof(*llq_info));
+
+ switch (llq_desc->header_location_ctrl) {
+ case ENA_ADMIN_INLINE_HEADER:
+ llq_info->inline_header = true;
+ break;
+ case ENA_ADMIN_HEADER_RING:
+ llq_info->inline_header = false;
+ break;
+ default:
+ ena_trc_err("Invalid header location control\n");
+ return -EINVAL;
+ }
+
+ switch (llq_desc->entry_size_ctrl) {
+ case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
+ llq_info->desc_list_entry_size = 128;
+ break;
+ case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
+ llq_info->desc_list_entry_size = 192;
+ break;
+ case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
+ llq_info->desc_list_entry_size = 256;
+ break;
+ default:
+ ena_trc_err("Invalid entry_size_ctrl %d\n",
+ llq_desc->entry_size_ctrl);
+ return -EINVAL;
+ }
+
+ if ((llq_info->desc_list_entry_size & 0x7)) {
+ /* The desc list entry size should be whole multiply of 8
+ * This requirement comes from __iowrite64_copy()
+ */
+ ena_trc_err("illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
+ return -EINVAL;
+ }
+
+ if (llq_info->inline_header) {
+ llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
+ if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
+ (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
+ ena_trc_err("Invalid desc_stride_ctrl %d\n",
+ llq_info->desc_stride_ctrl);
+ return -EINVAL;
+ }
+ } else {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+ }
+
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+ llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+ sizeof(struct ena_eth_io_tx_desc);
+ else
+ llq_info->descs_per_entry = 1;
+
+ llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
+
+ return 0;
+}
+
+
+
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
@@ -614,13 +714,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, timeout, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
ENA_MIGHT_SLEEP();
- timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT;
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
@@ -745,17 +846,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr,
- io_sq->desc_addr.mem_handle);
- else
- ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
io_sq->desc_addr.virt_addr = NULL;
}
+
+ if (io_sq->bounce_buf_ctrl.base_buffer) {
+ size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+ io_sq->bounce_buf_ctrl.base_buffer = NULL;
+ }
}
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -807,7 +911,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
ena_trc_dbg("Feature %d isn't supported\n", feature_id);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -1366,7 +1470,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_de
ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1480,7 +1584,6 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
if (admin_queue->comp_ctx)
ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
-
admin_queue->comp_ctx = NULL;
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
@@ -1503,6 +1606,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
@@ -1790,11 +1899,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
if (!rc)
memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
sizeof(get_resp.u.hw_hints));
- else if (rc == ENA_COM_PERMISSION)
+ else if (rc == ENA_COM_UNSUPPORTED)
memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+ if (!rc)
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+ sizeof(get_resp.u.llq));
+ else if (rc == ENA_COM_UNSUPPORTED)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+ return rc;
+
return 0;
}
@@ -1827,6 +1945,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
ena_aenq_handler handler_cb;
+ unsigned long long timestamp;
u16 masked_head, processed = 0;
u8 phase;
@@ -1838,11 +1957,12 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
/* Go over all the events */
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
phase) {
- ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n",
+ timestamp = (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
+ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group,
aenq_common->syndrom,
- (u64)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -1872,8 +1992,30 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
mb();
ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
+#ifdef ENA_EXTENDED_STATS
+/*
+ * Sets the function Idx and Queue Idx to be used for
+ * get full statistics feature
+ *
+ */
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+ u32 func_queue)
+{
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+ /* Function & Queue is acquired from user in the following format :
+ * Bottom Half word: funct
+ * Top Half Word: queue
+ */
+ ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
+ ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
+
+ return 0;
+}
+
+#endif /* ENA_EXTENDED_STATS */
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1901,6 +2043,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
@@ -1973,7 +2117,52 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *en
return ret;
}
+#ifdef ENA_EXTENDED_STATS
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+ u32 len)
+{
+ struct ena_com_stats_ctx ctx;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
+ ena_mem_handle_t mem_handle;
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ int ret;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
+ virt_addr, phys_addr, mem_handle);
+ if (!virt_addr) {
+ ret = ENA_COM_NO_MEM;
+ goto done;
+ }
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ get_cmd->u.control_buffer.length = len;
+
+ get_cmd->device_id = ena_dev->stats_func;
+ get_cmd->queue_idx = ena_dev->stats_queue;
+
+ ret = ena_get_dev_stats(ena_dev, &ctx,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
+ if (ret < 0)
+ goto free_ext_stats_mem;
+
+ ret = snprintf(buff, len, "%s", (char *)virt_addr);
+
+free_ext_stats_mem:
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
+ mem_handle);
+done:
+ return ret;
+}
+#endif
+
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
{
struct ena_com_admin_queue *admin_queue;
@@ -1983,7 +2172,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, i
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2037,7 +2226,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_
ENA_ADMIN_RSS_HASH_FUNCTION)) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
/* Validate hash function is supported */
@@ -2049,7 +2238,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
ena_trc_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2108,7 +2297,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
switch (func) {
@@ -2201,7 +2390,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_INPUT)) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2282,7 +2471,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
}
@@ -2360,7 +2549,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2636,7 +2825,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_d
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == ENA_COM_PERMISSION) {
+ if (rc == ENA_COM_UNSUPPORTED) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
@@ -2758,4 +2947,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_
entry->pkts_per_interval =
intr_moder_tbl[level].pkts_per_interval;
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq)
+{
+ int rc;
+ int size;
+
+ if (llq->max_llq_num == 0) {
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ rc = ena_com_config_llq_info(ena_dev, llq);
+ if (rc)
+ return rc;
+
+ /* Validate the descriptor is not too big */
+ size = ena_dev->tx_max_header_size;
+ size += ena_dev->llq_info.descs_num_before_header *
+ sizeof(struct ena_eth_io_tx_desc);
+
+ if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ ena_trc_err("the size of the LLQ entry is smaller than needed\n");
+ return ENA_COM_INVAL;
+ }
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+ return 0;
}
Modified: vendor-sys/ena-com/dist/ena_com.h
==============================================================================
--- vendor-sys/ena-com/dist/ena_com.h Tue Oct 31 12:15:00 2017 (r325233)
+++ vendor-sys/ena-com/dist/ena_com.h Tue Oct 31 12:20:48 2017 (r325234)
@@ -133,6 +133,15 @@ struct ena_com_tx_meta {
u16 l4_hdr_len; /* In words */
};
+struct ena_com_llq_info {
+ bool inline_header;
+ u16 desc_stride_ctrl;
+
+ u16 desc_list_entry_size;
+ u16 descs_num_before_header;
+ u16 descs_per_entry;
+};
+
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
void *bus;
@@ -171,6 +180,20 @@ struct ena_com_io_cq {
} ____cacheline_aligned;
+struct ena_com_io_bounce_buffer_control {
+ u8 *base_buffer;
+ u16 next_to_use;
+ u16 buffer_size;
+ u16 buffers_num; /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+ u8 *curr_bounce_buf;
+ u16 idx;
+ u16 descs_left_in_line;
+};
+
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
void *bus;
@@ -183,6 +206,9 @@ struct ena_com_io_sq {
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
+ struct ena_com_llq_info llq_info;
+ struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+ struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
u16 q_depth;
u16 qid;
@@ -190,6 +216,7 @@ struct ena_com_io_sq {
u16 idx;
u16 tail;
u16 next_to_comp;
+ u16 llq_last_copy_tail;
u32 tx_max_header_size;
u8 phase;
u8 desc_entry_size;
@@ -321,6 +348,7 @@ struct ena_com_dev {
void __iomem *mem_bar;
void *dmadev;
void *bus;
+
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
u16 stats_func; /* Selected function for extended statistic dump */
@@ -337,6 +365,8 @@ struct ena_com_dev {
u16 intr_delay_resolution;
u32 intr_moder_tx_interval;
struct ena_intr_moder_entry *intr_moder_tbl;
+
+ struct ena_com_llq_info llq_info;
};
struct ena_com_dev_get_features_ctx {
@@ -345,6 +375,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
+ struct ena_admin_feature_llq_desc llq;
};
struct ena_com_create_io_ctx {
@@ -426,10 +457,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
@@ -939,6 +972,15 @@ void ena_com_get_intr_moderation_entry(struct ena_com_
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
+
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.
+ *
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq);
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
@@ -1048,6 +1090,30 @@ static inline void ena_com_update_intr_reg(struct ena_
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+ u16 size, buffers_num;
+ u8 *buf;
+
+ size = bounce_buf_ctrl->buffer_size;
+ buffers_num = bounce_buf_ctrl->buffers_num;
+
+ buf = bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+ prefetch(bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+ return buf;
+}
+
+#ifdef ENA_EXTENDED_STATS
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+ u32 len);
+
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+ u32 funct_queue);
+#endif
#if defined(__cplusplus)
}
#endif /* __cplusplus */
Added: vendor-sys/ena-com/dist/ena_defs/ena_admin_defs.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ vendor-sys/ena-com/dist/ena_defs/ena_admin_defs.h Tue Oct 31 12:20:48 2017 (r325234)
@@ -0,0 +1,1484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+enum ena_admin_aq_completion_status {
+ ENA_ADMIN_SUCCESS = 0,
+
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+enum ena_admin_aq_feature_id {
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ ENA_ADMIN_HW_HINTS = 3,
+
+ ENA_ADMIN_LLQ = 4,
+
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ ENA_ADMIN_MTU = 14,
+
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers are in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+struct ena_admin_aq_common_desc {
+ /* 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command_id;
+
+ /* as appears in ena_admin_aq_opcode */
+ uint8_t opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ uint32_t length;
+
+ struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+ uint16_t sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved1;
+};
+
+struct ena_admin_aq_entry {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ uint32_t inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ uint32_t inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command;
+
+ uint8_t status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ uint16_t extended_status;
+
+ /* serves as a hint what AQ entries can be revoked */
+ uint16_t sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ uint32_t response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ uint8_t sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ uint8_t sq_caps_3;
+
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ uint16_t cq_idx;
+
+ /* submission queue depth in entries */
+ uint16_t sq_depth;
+
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ uint32_t reserved0_w7;
+
+ uint32_t reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ uint16_t sq_idx;
+
+ uint16_t reserved;
+
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+ uint32_t sq_doorbell_offset;
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-vendor
mailing list