From 3c8121a3177650daeb2475f1f2b4093b211df274 Mon Sep 17 00:00:00 2001 From: Jie Lei Date: Wed, 19 Jun 2024 15:13:53 +0800 Subject: [PATCH] hns3 udma: support non share jfr mode in UM mode driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8V1IQ CVE: NA -------------------------------------------------------- This patch support non share jfr mode in UM mode. Signed-off-by: Qi Xu Signed-off-by: Jie Lei --- hw/hns3/hns3_udma_u_buf.c | 2 +- hw/hns3/hns3_udma_u_buf.h | 1 + hw/hns3/hns3_udma_u_common.h | 3 +- hw/hns3/hns3_udma_u_jetty.c | 20 ++++++------ hw/hns3/hns3_udma_u_jfc.c | 5 ++- hw/hns3/hns3_udma_u_jfr.c | 49 ++++++++++++++++++++++-------- hw/hns3/hns3_udma_u_jfr.h | 4 +-- hw/hns3/hns3_udma_u_jfs.c | 9 ++++-- hw/hns3/hns3_udma_u_provider_ops.c | 10 +++--- hw/hns3/hns3_udma_u_provider_ops.h | 6 ++++ hw/hns3/hns3_udma_u_user_ctl.c | 41 ++++++++++++++++++++----- hw/hns3/hns3_udma_u_user_ctl_api.h | 2 ++ 12 files changed, 110 insertions(+), 42 deletions(-) diff --git a/hw/hns3/hns3_udma_u_buf.c b/hw/hns3/hns3_udma_u_buf.c index a473d0b..c76e9c1 100644 --- a/hw/hns3/hns3_udma_u_buf.c +++ b/hw/hns3/hns3_udma_u_buf.c @@ -154,7 +154,7 @@ static int exec_register_dca_mem_cmd(struct udma_u_context *ctx, return urma_cmd_user_ctl(urma_ctx, &in, &out, &udrv_data); } -static void ubn_u_free_dca_mem(struct udma_u_dca_mem *mem) +void ubn_u_free_dca_mem(struct udma_u_dca_mem *mem) { udma_free_buf(&mem->buf); free(mem); diff --git a/hw/hns3/hns3_udma_u_buf.h b/hw/hns3/hns3_udma_u_buf.h index 4d7741e..c31fdd4 100644 --- a/hw/hns3/hns3_udma_u_buf.h +++ b/hw/hns3/hns3_udma_u_buf.h @@ -57,5 +57,6 @@ int udma_u_attach_dca_mem(struct udma_u_context *ctx, uint32_t size, struct udma_dca_buf *buf, bool force); int udma_alloc_buf(struct udma_buf *buf, uint32_t size, int page_size); void udma_free_buf(struct udma_buf *buf); +void ubn_u_free_dca_mem(struct udma_u_dca_mem *mem); #endif /* _UDMA_U_BUF_H */ diff --git a/hw/hns3/hns3_udma_u_common.h b/hw/hns3/hns3_udma_u_common.h index 3050ab0..e3a1ec1 100644 --- a/hw/hns3/hns3_udma_u_common.h +++ b/hw/hns3/hns3_udma_u_common.h @@ -16,6 +16,7 @@ #ifndef _UDMA_U_COMMON_H #define _UDMA_U_COMMON_H +#include #include #include #include @@ -238,7 +239,7 @@ static inline unsigned long align(unsigned long val, unsigned long align) return (val + align - 1) & ~(align - 1); } -#define udma_hw_page_align(x) align(x, UDMA_HW_PAGE_SIZE) +#define udma_hw_page_align(x) align(x, sysconf(_SC_PAGESIZE)) static inline uint32_t to_udma_hem_entries_size(int count, int buf_shift) { diff --git a/hw/hns3/hns3_udma_u_jetty.c b/hw/hns3/hns3_udma_u_jetty.c index e60aa4f..5d1fad4 100644 --- a/hw/hns3/hns3_udma_u_jetty.c +++ b/hw/hns3/hns3_udma_u_jetty.c @@ -42,11 +42,7 @@ static urma_status_t alloc_jfr(struct udma_u_jetty *jetty, urma_context_t *ctx, if (jetty->share_jfr) { jetty->udma_jfr = to_udma_jfr(jetty_cfg->shared.jfr); } else { - if (jetty_cfg->jfr_cfg->trans_mode == URMA_TM_RC && - udma_ctx->dca_ctx.unit_size == 0) - urma_jfr = udma_u_create_jfr_rq(ctx, jetty_cfg->jfr_cfg, jetty); - else - urma_jfr = udma_u_create_jfr(ctx, jetty_cfg->jfr_cfg); + urma_jfr = udma_u_create_jfr_rq(ctx, jetty_cfg->jfr_cfg, jetty); if (!urma_jfr) { URMA_LOG_ERR("failed to create jfr.\n"); return URMA_FAIL; @@ -68,8 +64,13 @@ static urma_status_t alloc_qp_node_table(struct udma_u_jetty *jetty, return ret; if (jetty->tp_mode == URMA_TM_UM) { - jetty->um_qp = udma_alloc_qp(udma_ctx, true, &jetty_cfg->jfs_cfg, - NULL); + if (jetty->udma_jfr == NULL) + jetty->um_qp = udma_alloc_qp(udma_ctx, true, + &jetty_cfg->jfs_cfg, + jetty_cfg->jfr_cfg); + else + jetty->um_qp = udma_alloc_qp(udma_ctx, true, + &jetty_cfg->jfs_cfg, NULL); if (!jetty->um_qp) { URMA_LOG_ERR("UM qp alloc failed, jetty_id = %u.\n", jetty->urma_jetty.jetty_id.id); @@ -408,8 +409,7 @@ urma_jetty_t *udma_u_create_jetty(urma_context_t *ctx, struct udma_u_jetty *udma_jetty; urma_status_t ret; - if (!jetty_cfg->flag.bs.share_jfr && udma_ctx->dca_ctx.unit_size == 0 && - jetty_cfg->jfs_cfg.trans_mode == URMA_TM_RC) + if (!jetty_cfg->flag.bs.share_jfr && udma_ctx->dca_ctx.unit_size == 0) return udma_u_create_jetty_rq(ctx, jetty_cfg); udma_jetty = (struct udma_u_jetty *)calloc(1, sizeof(*udma_jetty)); @@ -842,7 +842,7 @@ urma_status_t udma_u_post_jetty_recv_wr(urma_jetty_t *jetty, if (!udma_jfr->lock_free) (void)pthread_spin_lock(&udma_jfr->lock); - if (udma_jfr->rq_en) { + if (!udma_jfr->share_jfr) { for (nreq = 0; wr; ++nreq, wr = wr->next) { ret = post_recv_one_rq(udma_jfr, wr); if (ret) { diff --git a/hw/hns3/hns3_udma_u_jfc.c b/hw/hns3/hns3_udma_u_jfc.c index 28028cc..e968e36 100644 --- a/hw/hns3/hns3_udma_u_jfc.c +++ b/hw/hns3/hns3_udma_u_jfc.c @@ -320,6 +320,9 @@ static struct udma_u_jfr *get_common_jfr(struct udma_u_context *udma_ctx, if (udma_ctx->jetty_table[table_id].refcnt) { jetty_table = udma_ctx->jetty_table[table_id].table; is_jetty = jetty_table[qpn & mask].is_jetty; + } else { + URMA_LOG_INFO("Failed to poll jfc. QP 0x%x has been destroyed.\n", qpn); + return NULL; } if (is_jetty) { jetty = (struct udma_u_jetty *)jetty_table[qpn & mask].jetty; @@ -354,7 +357,7 @@ static int parse_cqe_for_res(struct udma_u_context *udma_ctx, if (cqe->cqe_inline == CQE_INLINE_ENABLE) handle_recv_inl_cqe(cqe, jfr, cr); - if (jfr->rq_en) { + if (!jfr->share_jfr) { cr->user_ctx = jfr->wrid[jfr->idx_que.tail & (jfr->wqe_cnt - 1)]; jfr->idx_que.tail++; } else { diff --git a/hw/hns3/hns3_udma_u_jfr.c b/hw/hns3/hns3_udma_u_jfr.c index 7aec79a..d9fb2d9 100644 --- a/hw/hns3/hns3_udma_u_jfr.c +++ b/hw/hns3/hns3_udma_u_jfr.c @@ -13,6 +13,7 @@ * */ +#include #include #include "hns3_udma_u_common.h" #include "hns3_udma_u_db.h" @@ -103,9 +104,12 @@ static int alloc_jfr_buf(struct udma_u_jfr *jfr, struct udma_u_jetty *jetty) URMA_LOG_ERR("failed to alloc jfr wqe buf.\n"); goto err_alloc_buf; } - } else { + } else if (jetty->tp_mode == URMA_TM_RC) { jfr->wqe_buf.buf = jetty->rc_node->qp->buf.buf + jetty->rc_node->qp->rq.offset; + } else { + jfr->wqe_buf.buf = jetty->um_qp->buf.buf + + jetty->um_qp->rq.offset; } jfr->wrid = (uint64_t *)calloc(jfr->wqe_cnt, sizeof(*jfr->wrid)); @@ -129,7 +133,7 @@ err_alloc_buf: static void free_jfr_buf(struct udma_u_jfr *jfr) { free(jfr->wrid); - if (!jfr->rq_en) + if (jfr->share_jfr) udma_free_buf(&jfr->wqe_buf); udma_free_buf(&jfr->idx_que.idx_buf); udma_bitmap_free(jfr->idx_que.bitmap); @@ -146,14 +150,22 @@ static int exec_jfr_create_cmd(urma_context_t *ctx, struct udma_u_jfr *jfr, cmd.buf_addr = (uintptr_t)jfr->wqe_buf.buf; cmd.idx_addr = (uintptr_t)jfr->idx_que.idx_buf.buf; cmd.db_addr = (uintptr_t)jfr->db; - cmd.share_jfr = !jfr->rq_en; if (jetty != NULL) { - cmd.wqe_buf_addr = (uintptr_t)jetty->rc_node->qp->buf.buf; - cmd.sqe_cnt = jetty->rc_node->qp->sq.wqe_cnt; - cmd.sqe_shift = jetty->rc_node->qp->sq.wqe_shift; - cmd.sge_cnt = jetty->rc_node->qp->ex_sge.sge_cnt; - cmd.sge_shift = jetty->rc_node->qp->ex_sge.sge_shift; - cmd.share_jfr = jetty->share_jfr; + if (jetty->tp_mode == URMA_TM_RC) { + cmd.wqe_buf_addr = (uintptr_t)jetty->rc_node->qp->buf.buf; + cmd.sqe_cnt = jetty->rc_node->qp->sq.wqe_cnt; + cmd.sqe_shift = jetty->rc_node->qp->sq.wqe_shift; + cmd.sge_cnt = jetty->rc_node->qp->ex_sge.sge_cnt; + cmd.sge_shift = jetty->rc_node->qp->ex_sge.sge_shift; + cmd.share_jfr = jetty->share_jfr; + } else { + cmd.wqe_buf_addr = (uintptr_t)jetty->um_qp->buf.buf; + cmd.sqe_cnt = jetty->um_qp->sq.wqe_cnt; + cmd.sqe_shift = jetty->um_qp->sq.wqe_shift; + cmd.sge_cnt = jetty->um_qp->ex_sge.sge_cnt; + cmd.sge_shift = jetty->um_qp->ex_sge.sge_shift; + cmd.share_jfr = jetty->share_jfr; + } } udma_set_udata(&udata, &cmd, sizeof(cmd), &resp, sizeof(resp)); @@ -262,7 +274,7 @@ urma_jfr_t *udma_u_create_jfr_rq(urma_context_t *ctx, urma_jfr_cfg_t *cfg, return NULL; jfr->lock_free = cfg->flag.bs.lock_free; - jfr->rq_en = UDMA_JFR_RQ_EN; + jfr->share_jfr = false; if (pthread_spin_init(&jfr->lock, PTHREAD_PROCESS_PRIVATE)) goto err_init_lock_rq; @@ -284,8 +296,17 @@ urma_jfr_t *udma_u_create_jfr_rq(urma_context_t *ctx, urma_jfr_cfg_t *cfg, goto err_insert_jfr_rq; } + if (cfg->trans_mode == URMA_TM_UM) { + if (alloc_um_header_que(ctx, jfr)) { + URMA_LOG_ERR("alloc grh que failed.\n"); + goto err_alloc_um_header; + } + } + return &jfr->urma_jfr; +err_alloc_um_header: + delete_jfr_node(udma_ctx, jfr); err_insert_jfr_rq: urma_cmd_delete_jfr(&jfr->urma_jfr); err_create_jfr_rq: @@ -313,6 +334,7 @@ urma_jfr_t *udma_u_create_jfr(urma_context_t *ctx, urma_jfr_cfg_t *cfg) return NULL; memset(jfr, 0, sizeof(*jfr)); jfr->lock_free = cfg->flag.bs.lock_free; + jfr->share_jfr = true; if (pthread_spin_init(&jfr->lock, PTHREAD_PROCESS_PRIVATE)) goto err_init_lock; @@ -376,7 +398,7 @@ urma_status_t udma_u_delete_jfr(urma_jfr_t *jfr) free_um_header_que(udma_jfr); delete_jfr_node(udma_ctx, udma_jfr); - if (!udma_jfr->rq_en) + if (udma_jfr->share_jfr) delete_jetty_node(udma_ctx, udma_jfr->jfrn); ret = urma_cmd_delete_jfr(jfr); if (ret) { @@ -503,6 +525,9 @@ urma_status_t post_recv_one_rq(struct udma_u_jfr *udma_jfr, urma_jfr_wr_t *wr) wqe_idx = udma_jfr->idx_que.head & (udma_jfr->wqe_cnt - 1); wqe = get_jfr_wqe(udma_jfr, wqe_idx); + if (unlikely(udma_jfr->trans_mode == URMA_TM_UM)) + wqe = set_um_header_sge(udma_jfr, wqe_idx, wqe); + fill_recv_sge_to_wqe(wr, wqe, max_sge); udma_jfr->idx_que.head++; @@ -566,7 +591,7 @@ urma_status_t udma_u_post_jfr_wr(urma_jfr_t *jfr, urma_jfr_wr_t *wr, if (!udma_jfr->lock_free) (void)pthread_spin_lock(&udma_jfr->lock); - if (udma_jfr->rq_en) { + if (!udma_jfr->share_jfr) { for (nreq = 0; wr; ++nreq, wr = wr->next) { ret = post_recv_one_rq(udma_jfr, wr); if (ret) { diff --git a/hw/hns3/hns3_udma_u_jfr.h b/hw/hns3/hns3_udma_u_jfr.h index fa75e1e..94ceda2 100644 --- a/hw/hns3/hns3_udma_u_jfr.h +++ b/hw/hns3/hns3_udma_u_jfr.h @@ -21,8 +21,6 @@ #define UDMA_JFR_GRH_HEAD_SZ 40 -#define UDMA_JFR_RQ_EN 1 - struct udma_u_jfr_idx_que { struct udma_buf idx_buf; uint32_t entry_shift; @@ -64,7 +62,7 @@ struct udma_u_jfr { struct udma_buf wqe_buf; uint32_t wqe_cnt; uint32_t wqe_shift; - bool rq_en; + bool share_jfr; }; struct udma_jfr_node { diff --git a/hw/hns3/hns3_udma_u_jfs.c b/hw/hns3/hns3_udma_u_jfs.c index 0194ed2..34eae31 100644 --- a/hw/hns3/hns3_udma_u_jfs.c +++ b/hw/hns3/hns3_udma_u_jfs.c @@ -50,7 +50,7 @@ static urma_status_t alloc_qp_wqe_buf(struct udma_u_context *ctx, struct udma_qp URMA_LOG_ERR("DCA wqe bufs alloc failed!\n"); return URMA_ENOMEM; } - } else if (udma_alloc_buf(&qp->buf, buf_size, UDMA_HW_PAGE_SIZE)) { + } else if (udma_alloc_buf(&qp->buf, buf_size, ctx->page_size)) { URMA_LOG_ERR("qp wqe buf alloc failed!\n"); return URMA_ENOMEM; } @@ -66,6 +66,7 @@ static void init_sq_param(struct udma_qp *qp, urma_jfs_cfg_t *cfg, urma_jfr_cfg_ uint32_t cfg_depth; int wqe_sge_cnt; uint32_t max_gs; + uint32_t rq_cnt; cfg_depth = roundup_pow_of_two(cfg->depth); qp->sq.wqe_cnt = cfg_depth < UDMA_MIN_JFS_DEPTH ? @@ -96,8 +97,12 @@ static void init_sq_param(struct udma_qp *qp, urma_jfs_cfg_t *cfg, urma_jfr_cfg_ /* rc rq param */ if (jfr_cfg != NULL) { qp->rq.wqe_cnt = roundup_pow_of_two(jfr_cfg->depth); + if (jfr_cfg->trans_mode == URMA_TM_UM) + rq_cnt = roundup_pow_of_two(jfr_cfg->max_sge + 1); + else + rq_cnt = roundup_pow_of_two(jfr_cfg->max_sge); qp->rq.wqe_shift = udma_ilog32(roundup_pow_of_two(UDMA_HW_SGE_SIZE * - roundup_pow_of_two(jfr_cfg->max_sge))); + rq_cnt)); } } diff --git a/hw/hns3/hns3_udma_u_provider_ops.c b/hw/hns3/hns3_udma_u_provider_ops.c index d6f173d..a302948 100644 --- a/hw/hns3/hns3_udma_u_provider_ops.c +++ b/hw/hns3/hns3_udma_u_provider_ops.c @@ -191,12 +191,16 @@ void udma_cleanup_dca_mem(struct udma_u_context *ctx) { struct udma_u_dca_ctx *dca_ctx = &ctx->dca_ctx; struct udma_dca_dereg_attr dereg_attr = {}; - struct udma_u_dca_mem *mem; + struct udma_u_dca_mem *mem, *tmp; - list_for_each_entry(mem, &dca_ctx->mem_list, entry) { + pthread_spin_lock(&dca_ctx->lock); + list_for_each_entry_safe(mem, tmp, &dca_ctx->mem_list, entry) { dereg_attr.free_key = dca_mem_to_key(mem); exec_deregister_dca_mem_cmd(ctx, &dereg_attr); + list_del(&mem->entry); + ubn_u_free_dca_mem(mem); } + pthread_spin_unlock(&dca_ctx->lock); } static void uninit_dca_context(struct udma_u_context *udma_u_ctx) @@ -207,9 +211,7 @@ static void uninit_dca_context(struct udma_u_context *udma_u_ctx) if (!dca_ctx->unit_size) return; - pthread_spin_lock(&dca_ctx->lock); udma_cleanup_dca_mem(udma_u_ctx); - pthread_spin_unlock(&dca_ctx->lock); if (dca_ctx->buf_status) { ret = munmap(dca_ctx->buf_status, (size_t)udma_u_ctx->page_size); diff --git a/hw/hns3/hns3_udma_u_provider_ops.h b/hw/hns3/hns3_udma_u_provider_ops.h index a4f395b..d81c6fe 100644 --- a/hw/hns3/hns3_udma_u_provider_ops.h +++ b/hw/hns3/hns3_udma_u_provider_ops.h @@ -58,6 +58,12 @@ &(POS)->MEMBER != (HEAD); \ (POS) = list_next_entry(POS, MEMBER)) +#define list_for_each_entry_safe(POS, n, HEAD, MEMBER) \ + for ((POS) = list_first_entry(HEAD, typeof(*(POS)), MEMBER), \ + (n) = list_next_entry((POS), MEMBER); \ + &(POS)->MEMBER != (HEAD); \ + (POS) = (n), (n) = list_next_entry(n, MEMBER)) + #define min_t(t, a, b) \ ({ \ t _ta = (a); \ diff --git a/hw/hns3/hns3_udma_u_user_ctl.c b/hw/hns3/hns3_udma_u_user_ctl.c index 650255a..9d966b9 100644 --- a/hw/hns3/hns3_udma_u_user_ctl.c +++ b/hw/hns3/hns3_udma_u_user_ctl.c @@ -259,8 +259,7 @@ static struct udma_qp *find_jfs_qp(struct udma_u_jfs *jfs) return NULL; } -static int update_jfs_ci(urma_jfs_t *jfs, urma_target_jetty_t *tjetty, - uint32_t wqe_cnt) +static int update_jfs_ci(urma_jfs_t *jfs, uint32_t wqe_cnt) { struct udma_qp *qp; @@ -269,12 +268,22 @@ static int update_jfs_ci(urma_jfs_t *jfs, urma_target_jetty_t *tjetty, return EINVAL; } + if (wqe_cnt == 0) { + URMA_LOG_ERR("input wqe num is zero.\n"); + return EINVAL; + } + qp = find_jfs_qp(to_udma_jfs(jfs)); if (!qp) { URMA_LOG_ERR("can't find qp by jfs.\n"); return EINVAL; } + if (qp->sq.head - qp->sq.tail < wqe_cnt) { + URMA_LOG_ERR("input wqe num is wrong, wqe_cnt = %d.\n", wqe_cnt); + return EINVAL; + } + qp->sq.tail += wqe_cnt; return 0; } @@ -306,12 +315,22 @@ static int update_jetty_ci(urma_jetty_t *jetty, uint32_t wqe_cnt) return EINVAL; } + if (wqe_cnt == 0) { + URMA_LOG_ERR("input wqe num is zero.\n"); + return EINVAL; + } + qp = find_jetty_qp(to_udma_jetty(jetty)); if (!qp) { URMA_LOG_ERR("can't find qp by jetty.\n"); return EINVAL; } + if (qp->sq.head - qp->sq.tail < wqe_cnt) { + URMA_LOG_ERR("input wqe num is wrong, wqe_cnt = %d.\n", wqe_cnt); + return EINVAL; + } + qp->sq.tail += wqe_cnt; return 0; } @@ -325,8 +344,7 @@ static int udma_u_update_queue_ci(urma_context_t *ctx, urma_user_ctl_in_t *in, memcpy(&update_in, (void *)in->addr, min(in->len, sizeof(struct hns3_udma_update_queue_ci_in))); if (update_in.type == JFS_TYPE) { - ret = update_jfs_ci(update_in.jfs, update_in.tjetty, - update_in.wqe_cnt); + ret = update_jfs_ci(update_in.jfs, update_in.wqe_cnt); } else if (update_in.type == JETTY_TYPE) { ret = update_jetty_ci(update_in.jetty, update_in.wqe_cnt); } else { @@ -491,6 +509,8 @@ static void udma_u_get_jetty_info_set_info_out(struct hns3_u_udma_get_jetty_info info_out->db_addr = udma_ctx->uar + UDMA_DB_CFG0_OFFSET; info_out->dwqe_addr = qp->dwqe_page; info_out->ext_sge_tail_addr = get_send_sge_ex(qp, qp->ex_sge.sge_cnt); + info_out->sge_idx = &qp->next_sge; + info_out->dwqe_enable = !!(qp->flags & HNS3_UDMA_QP_CAP_DIRECT_WQE); } int udma_u_get_jetty_info(urma_context_t *ctx, urma_user_ctl_in_t *in, @@ -500,6 +520,7 @@ int udma_u_get_jetty_info(urma_context_t *ctx, urma_user_ctl_in_t *in, struct hns3_u_udma_get_jetty_info_in *info_in; struct udma_u_jetty *udma_jetty; struct udma_u_context *udma_ctx; + struct udma_u_jfs *udma_jfs; struct udma_qp *qp; if (in->len != sizeof(struct hns3_u_udma_get_jetty_info_in)) { @@ -509,14 +530,18 @@ int udma_u_get_jetty_info(urma_context_t *ctx, urma_user_ctl_in_t *in, info_in = (struct hns3_u_udma_get_jetty_info_in *)in->addr; - if (!info_in->jetty) { - URMA_LOG_ERR("Jetty is null.\n"); + if (info_in->type == JFS_TYPE && info_in->jfs) { + udma_jfs = to_udma_jfs(info_in->jfs); + qp = udma_jfs->um_qp; + } else if (info_in->type == JETTY_TYPE && info_in->jetty) { + udma_jetty = to_udma_jetty(info_in->jetty); + qp = udma_jetty->rc_node->qp; + } else { + URMA_LOG_ERR("Invalid parameter for query jetty/jfs info.\n"); return EINVAL; } - udma_jetty = to_udma_jetty(info_in->jetty); udma_ctx = to_udma_ctx(ctx); - qp = udma_jetty->rc_node->qp; udma_u_get_jetty_info_set_info_out((struct hns3_u_udma_get_jetty_info_out *)out->addr, qp, udma_ctx); diff --git a/hw/hns3/hns3_udma_u_user_ctl_api.h b/hw/hns3/hns3_udma_u_user_ctl_api.h index 0402661..c4904d7 100644 --- a/hw/hns3/hns3_udma_u_user_ctl_api.h +++ b/hw/hns3/hns3_udma_u_user_ctl_api.h @@ -120,6 +120,8 @@ struct hns3_u_udma_get_jetty_info_out { void *ext_sge_tail_addr; uint32_t sl; void *head_idx; + void *sge_idx; + bool dwqe_enable; }; enum hns3_udma_u_user_ctl_opcode { -- Gitee