代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/dpdk 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
From 359d54d7a384c12e0b4d0514939d082138163905 Mon Sep 17 00:00:00 2001
From: wuchangsheng <wuchangsheng2@huawei.com>
Date: Sat, 25 Dec 2021 15:54:12 +0800
Subject: [PATCH] 7
---
config/rte_config.h | 3 +-
lib/eal/common/eal_common_config.c | 43 +++++-
lib/eal/common/eal_common_dynmem.c | 67 +++++++-
lib/eal/common/eal_common_fbarray.c | 106 +++++++++++--
lib/eal/common/eal_common_memory.c | 90 +++++++++--
lib/eal/common/eal_common_options.c | 179 +++++++++++++---------
lib/eal/common/eal_filesystem.h | 58 ++++++-
lib/eal/common/eal_internal_cfg.h | 4 +-
lib/eal/common/eal_memalloc.h | 7 +
lib/eal/common/eal_options.h | 11 +-
lib/eal/common/eal_private.h | 27 +++-
lib/eal/include/rte_eal.h | 10 +-
lib/eal/include/rte_fbarray.h | 6 +
lib/eal/include/rte_memory.h | 20 ++-
lib/eal/linux/eal.c | 230 +++++++++++++++++++++++++---
lib/eal/linux/eal_hugepage_info.c | 2 +-
lib/eal/linux/eal_memalloc.c | 128 +++++++++++++---
lib/eal/linux/eal_memory.c | 104 ++++++++++---
lib/ring/rte_ring.h | 75 +++++++++
19 files changed, 978 insertions(+), 192 deletions(-)
diff --git a/config/rte_config.h b/config/rte_config.h
index cab4390a97..d2f192ee9b 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -34,7 +34,8 @@
#define RTE_MAX_MEM_MB_PER_LIST 32768
#define RTE_MAX_MEMSEG_PER_TYPE 32768
#define RTE_MAX_MEM_MB_PER_TYPE 65536
-#define RTE_MAX_MEMZONE 2560
+#define RTE_MAX_MEMZONE 65535
+#define RTE_MAX_SECONDARY 256
#define RTE_MAX_TAILQ 32
#define RTE_LOG_DP_LEVEL RTE_LOG_INFO
#define RTE_BACKTRACE 1
diff --git a/lib/eal/common/eal_common_config.c b/lib/eal/common/eal_common_config.c
index 1c4c4dd585..fe3db184b7 100644
--- a/lib/eal/common/eal_common_config.c
+++ b/lib/eal/common/eal_common_config.c
@@ -22,18 +22,29 @@ static char runtime_dir[PATH_MAX];
/* internal configuration */
static struct internal_config internal_config;
-const char *
+/****** APIs for libnet ******/
+static char sec_runtime_dir[RTE_MAX_SECONDARY][PATH_MAX];
+static struct rte_config sec_rte_config[RTE_MAX_SECONDARY];
+static struct internal_config sec_internal_config[RTE_MAX_SECONDARY];
+
+char *
rte_eal_get_runtime_dir(void)
{
return runtime_dir;
}
-int
-eal_set_runtime_dir(char *run_dir, size_t size)
+char *
+rte_eal_sec_get_runtime_dir(const int sec_idx)
+{
+ return sec_runtime_dir[sec_idx];
+}
+
+static int
+set_runtime_dir(char *dst_dir, char *src_dir, size_t size)
{
size_t str_size;
- str_size = strlcpy(runtime_dir, run_dir, size);
+ str_size = strlcpy(dst_dir, src_dir, size);
if (str_size >= size) {
RTE_LOG(ERR, EAL, "Runtime directory string too long\n");
return -1;
@@ -42,6 +53,18 @@ eal_set_runtime_dir(char *run_dir, size_t size)
return 0;
}
+int
+eal_sec_set_runtime_dir(char *run_dir, size_t size, const int sec_idx)
+{
+ return set_runtime_dir(sec_runtime_dir[sec_idx], run_dir, size);
+}
+
+int
+eal_set_runtime_dir(char *run_dir, size_t size)
+{
+ return set_runtime_dir(runtime_dir, run_dir, size);
+}
+
/* Return a pointer to the configuration structure */
struct rte_config *
rte_eal_get_configuration(void)
@@ -49,6 +72,18 @@ rte_eal_get_configuration(void)
return &rte_config;
}
+struct rte_config *
+rte_eal_sec_get_configuration(const int sec_idx)
+{
+ return &sec_rte_config[sec_idx];
+}
+
+struct internal_config *
+rte_eal_sec_get_internal_config(const int sec_idx)
+{
+ return &sec_internal_config[sec_idx];
+}
+
/* Return a pointer to the internal configuration structure */
struct internal_config *
eal_get_internal_configuration(void)
diff --git a/lib/eal/common/eal_common_dynmem.c b/lib/eal/common/eal_common_dynmem.c
index 7c5437ddfa..eff78c14d9 100644
--- a/lib/eal/common/eal_common_dynmem.c
+++ b/lib/eal/common/eal_common_dynmem.c
@@ -16,6 +16,50 @@
/** @file Functions common to EALs that support dynamic memory allocation. */
+static int
+eal_sec_set_num_pages(struct internal_config *internal_conf,
+ struct hugepage_info *used_hp)
+{
+ int ret;
+ int hp_sz_idx;
+ uint64_t memory[RTE_MAX_NUMA_NODES];
+
+ if (!internal_conf || !used_hp) {
+ return -1;
+ }
+
+ for (hp_sz_idx = 0;
+ hp_sz_idx < (int) internal_conf->num_hugepage_sizes;
+ hp_sz_idx++) {
+ struct hugepage_info *hpi;
+ hpi = &internal_conf->hugepage_info[hp_sz_idx];
+ used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+ }
+
+ for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
+ memory[hp_sz_idx] = internal_conf->socket_mem[hp_sz_idx];
+
+ ret = eal_dynmem_calc_num_pages_per_socket(memory,
+ internal_conf->hugepage_info, used_hp,
+ internal_conf->num_hugepage_sizes);
+
+ return ret;
+}
+
+static int
+eal_sec_get_num_pages(const struct hugepage_info *used_hp,
+ uint64_t hugepage_sz, int socket)
+{
+ int hp_sz_idx;
+
+ for (hp_sz_idx = 0; hp_sz_idx < MAX_HUGEPAGE_SIZES; hp_sz_idx++) {
+ if (used_hp[hp_sz_idx].hugepage_sz == hugepage_sz)
+ return used_hp[hp_sz_idx].num_pages[socket];
+ }
+
+ return 0;
+}
+
int
eal_dynmem_memseg_lists_init(void)
{
@@ -29,6 +73,7 @@ eal_dynmem_memseg_lists_init(void)
uint64_t max_mem, max_mem_per_type;
unsigned int max_seglists_per_type;
unsigned int n_memtypes, cur_type;
+ struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
struct internal_config *internal_conf =
eal_get_internal_configuration();
@@ -36,6 +81,14 @@ eal_dynmem_memseg_lists_init(void)
if (internal_conf->no_hugetlbfs)
return 0;
+ if (internal_conf->map_perfect) {
+ memset(used_hp, 0, sizeof(used_hp));
+ ret = eal_sec_set_num_pages(internal_conf, used_hp);
+ if (ret == -1) {
+ RTE_LOG(ERR, EAL, "Cannot get num pages\n");
+ }
+ }
+
/*
* figuring out amount of memory we're going to have is a long and very
* involved process. the basic element we're operating with is a memory
@@ -132,6 +185,7 @@ eal_dynmem_memseg_lists_init(void)
struct memtype *type = &memtypes[cur_type];
uint64_t max_mem_per_list, pagesz;
int socket_id;
+ unsigned int need_n_segs, cur_n_segs;
pagesz = type->page_sz;
socket_id = type->socket_id;
@@ -175,8 +229,17 @@ eal_dynmem_memseg_lists_init(void)
"n_segs:%i socket_id:%i hugepage_sz:%" PRIu64 "\n",
n_seglists, n_segs, socket_id, pagesz);
+ if (internal_conf->map_perfect)
+ need_n_segs = eal_sec_get_num_pages(used_hp, pagesz, socket_id);
+ else
+ need_n_segs = n_segs;
+
/* create all segment lists */
- for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {
+ for (cur_seglist = 0; cur_seglist < n_seglists && need_n_segs > 0; cur_seglist++) {
+ cur_n_segs = RTE_MIN(need_n_segs, n_segs);
+ if (internal_conf->map_perfect)
+ need_n_segs -= cur_n_segs;
+
if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
RTE_LOG(ERR, EAL,
"No more space in memseg lists, please increase %s\n",
@@ -185,7 +248,7 @@ eal_dynmem_memseg_lists_init(void)
}
msl = &mcfg->memsegs[msl_idx++];
- if (eal_memseg_list_init(msl, pagesz, n_segs,
+ if (eal_memseg_list_init(msl, pagesz, cur_n_segs,
socket_id, cur_seglist, true))
goto out;
diff --git a/lib/eal/common/eal_common_fbarray.c b/lib/eal/common/eal_common_fbarray.c
index 3a28a53247..9c125c104c 100644
--- a/lib/eal/common/eal_common_fbarray.c
+++ b/lib/eal/common/eal_common_fbarray.c
@@ -9,6 +9,8 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
+#include <sys/file.h>
+#include <sys/mman.h>
#include <rte_common.h>
#include <rte_eal_paging.h>
@@ -830,8 +832,9 @@ rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
return -1;
}
-int
-rte_fbarray_attach(struct rte_fbarray *arr)
+static int
+__rte_fbarray_attach(struct rte_fbarray *arr, const char *runtime_dir,
+ const struct internal_config *internal_conf)
{
struct mem_area *ma = NULL, *tmp = NULL;
size_t page_sz, mmap_len;
@@ -867,13 +870,15 @@ rte_fbarray_attach(struct rte_fbarray *arr)
mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
- /* check the tailq - maybe user has already mapped this address space */
- rte_spinlock_lock(&mem_area_lock);
+ if (!internal_conf->pri_and_sec) {
+ /* check the tailq - maybe user has already mapped this address space */
+ rte_spinlock_lock(&mem_area_lock);
- TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
- if (overlap(tmp, arr->data, mmap_len)) {
- rte_errno = EEXIST;
- goto fail;
+ TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
+ if (overlap(tmp, arr->data, mmap_len)) {
+ rte_errno = EEXIST;
+ goto fail;
+ }
}
}
@@ -883,7 +888,7 @@ rte_fbarray_attach(struct rte_fbarray *arr)
if (data == NULL)
goto fail;
- eal_get_fbarray_path(path, sizeof(path), arr->name);
+ eal_sec_get_fbarray_path(path, sizeof(path), arr->name, runtime_dir);
fd = eal_file_open(path, EAL_OPEN_READWRITE);
if (fd < 0) {
@@ -897,16 +902,27 @@ rte_fbarray_attach(struct rte_fbarray *arr)
if (resize_and_map(fd, path, data, mmap_len))
goto fail;
+ if (internal_conf->pri_and_sec) {
+ if (flock(fd, LOCK_UN)) {
+ rte_errno = errno;
+ goto fail;
+ }
+ close(fd);
+ fd = -1;
+ }
+
/* store our new memory area */
ma->addr = data;
ma->fd = fd; /* keep fd until detach/destroy */
ma->len = mmap_len;
- TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
+ if (!internal_conf->pri_and_sec) {
+ TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
- /* we're done */
+ /* we're done */
- rte_spinlock_unlock(&mem_area_lock);
+ rte_spinlock_unlock(&mem_area_lock);
+ }
return 0;
fail:
if (data)
@@ -918,6 +934,31 @@ rte_fbarray_attach(struct rte_fbarray *arr)
return -1;
}
+int
+rte_fbarray_attach(struct rte_fbarray *arr)
+{
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
+ return __rte_fbarray_attach(arr, rte_eal_get_runtime_dir(), internal_conf);
+}
+
+int
+rte_sec_fbarray_attach(struct rte_fbarray *arr,
+ const int switch_pri_and_sec, const int sec_idx)
+{
+ struct internal_config *internal_conf = NULL;
+ char *runtime_dir = NULL;
+
+ if (!switch_pri_and_sec) {
+ runtime_dir = rte_eal_get_runtime_dir();
+ internal_conf = eal_get_internal_configuration();
+ } else {
+ runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+ internal_conf = rte_eal_sec_get_internal_config(sec_idx);
+ }
+
+ return __rte_fbarray_attach(arr, runtime_dir, internal_conf);
+}
+
int
rte_fbarray_detach(struct rte_fbarray *arr)
{
@@ -1057,6 +1098,47 @@ rte_fbarray_destroy(struct rte_fbarray *arr)
return ret;
}
+int
+rte_sec_fbarray_destroy(struct rte_fbarray *arr,
+ const int sec_idx)
+{
+ int fd, ret;
+ char path[PATH_MAX];
+
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ size_t page_sz = rte_mem_page_size();
+ if (page_sz == (size_t)-1)
+ return -1;
+
+ size_t mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+ rte_mem_unmap(arr->data, mmap_len);
+
+ /* try deleting the file */
+ eal_sec_get_fbarray_path(path, sizeof(path), arr->name, rte_eal_sec_get_runtime_dir(sec_idx));
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open fbarray file: %s\n", strerror(errno));
+ return -1;
+ }
+ if (flock(fd, LOCK_EX | LOCK_NB)) {
+ RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
+ rte_errno = EBUSY;
+ ret = -1;
+ } else {
+ ret = 0;
+ unlink(path);
+ memset(arr, 0, sizeof(*arr));
+ }
+ close(fd);
+
+ return ret;
+}
+
void *
rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
{
diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index 616db5ce31..884996faf2 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -307,9 +307,9 @@ virt2memseg(const void *addr, const struct rte_memseg_list *msl)
}
static struct rte_memseg_list *
-virt2memseg_list(const void *addr)
+virt2memseg_list(const void *addr, const struct rte_config *rte_cfg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
struct rte_memseg_list *msl;
int msl_idx;
@@ -331,7 +331,13 @@ virt2memseg_list(const void *addr)
struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *addr)
{
- return virt2memseg_list(addr);
+ return virt2memseg_list(addr, rte_eal_get_configuration());
+}
+
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg)
+{
+ return virt2memseg_list(addr, rte_cfg);
}
struct virtiova {
@@ -386,11 +392,25 @@ rte_mem_iova2virt(rte_iova_t iova)
return vi.virt;
}
+static struct rte_memseg *
+__rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg)
+{
+ return virt2memseg(addr, msl != NULL ? msl :
+ rte_sec_mem_virt2memseg_list(addr, rte_cfg));
+}
+
struct rte_memseg *
rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
- return virt2memseg(addr, msl != NULL ? msl :
- rte_mem_virt2memseg_list(addr));
+ return __rte_mem_virt2memseg(addr, msl, rte_eal_get_configuration());
+}
+
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg)
+{
+ return __rte_mem_virt2memseg(addr, msl, rte_cfg);
}
static int
@@ -1069,12 +1089,14 @@ rte_eal_memory_detach(void)
}
/* init memory subsystem */
-int
-rte_eal_memory_init(void)
+static int
+__rte_eal_memory_init(__attribute__((__unused__)) const char *runtime_dir,
+ const struct internal_config *internal_conf,
+ struct rte_config *rte_cfg,
+ const int switch_pri_and_sec,
+ const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
int retval;
RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
@@ -1083,17 +1105,18 @@ rte_eal_memory_init(void)
return -1;
/* lock mem hotplug here, to prevent races while we init */
- rte_mcfg_mem_read_lock();
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
- if (rte_eal_memseg_init() < 0)
+ if (rte_eal_memseg_init(switch_pri_and_sec, sec_idx) < 0)
goto fail;
- if (eal_memalloc_init() < 0)
- goto fail;
+ if (!internal_conf->pri_and_sec)
+ if (eal_memalloc_init() < 0)
+ goto fail;
- retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ retval = rte_cfg->process_type == RTE_PROC_PRIMARY ?
rte_eal_hugepage_init() :
- rte_eal_hugepage_attach();
+ rte_eal_hugepage_attach(switch_pri_and_sec, sec_idx);
if (retval < 0)
goto fail;
@@ -1102,10 +1125,43 @@ rte_eal_memory_init(void)
return 0;
fail:
- rte_mcfg_mem_read_unlock();
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
return -1;
}
+int
+rte_eal_memory_init(void)
+{
+ const int unused_idx = -1;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return __rte_eal_memory_init(rte_eal_get_runtime_dir(),
+ internal_conf, rte_eal_get_configuration(),
+ false, unused_idx);
+}
+
+int
+rte_eal_sec_memory_init(const int sec_idx)
+{
+ int ret;
+ struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+
+ ret = __rte_eal_memory_init(rte_eal_sec_get_runtime_dir(sec_idx),
+ rte_eal_sec_get_internal_config(sec_idx), rte_cfg,
+ true, sec_idx);
+
+ rte_rwlock_read_unlock(&rte_cfg->mem_config->memory_hotplug_lock);
+
+ return ret;
+}
+
+int
+rte_eal_sec_memory_cleanup(const int sec_idx)
+{
+ return eal_sec_memalloc_destroy(sec_idx);
+}
+
#ifndef RTE_EXEC_ENV_WINDOWS
#define EAL_MEMZONE_LIST_REQ "/eal/memzone_list"
#define EAL_MEMZONE_INFO_REQ "/eal/memzone_info"
diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c
index 1cfdd75f3b..ba3b19ee6e 100644
--- a/lib/eal/common/eal_common_options.c
+++ b/lib/eal/common/eal_common_options.c
@@ -105,6 +105,7 @@ eal_long_options[] = {
{OPT_TELEMETRY, 0, NULL, OPT_TELEMETRY_NUM },
{OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM },
{OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM},
+ {OPT_MAP_PERFECT, 0, NULL, OPT_MAP_PERFECT_NUM },
{0, 0, NULL, 0 }
};
@@ -301,54 +302,66 @@ eal_get_hugefile_prefix(void)
return HUGEFILE_PREFIX_DEFAULT;
}
+const char *
+eal_sec_get_hugefile_prefix(const int sec_idx)
+{
+ struct internal_config *internal_conf =
+ rte_eal_sec_get_internal_config(sec_idx);
+
+ if (internal_conf->hugefile_prefix != NULL)
+ return internal_conf->hugefile_prefix;
+ return HUGEFILE_PREFIX_DEFAULT;
+}
+
void
-eal_reset_internal_config(struct internal_config *internal_cfg)
+eal_reset_internal_config(struct internal_config *internal_conf)
{
int i;
- internal_cfg->memory = 0;
- internal_cfg->force_nrank = 0;
- internal_cfg->force_nchannel = 0;
- internal_cfg->hugefile_prefix = NULL;
- internal_cfg->hugepage_dir = NULL;
- internal_cfg->force_sockets = 0;
+ internal_conf->memory = 0;
+ internal_conf->force_nrank = 0;
+ internal_conf->force_nchannel = 0;
+ internal_conf->hugefile_prefix = NULL;
+ internal_conf->hugepage_dir = NULL;
+ internal_conf->force_sockets = 0;
/* zero out the NUMA config */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->socket_mem[i] = 0;
- internal_cfg->force_socket_limits = 0;
+ internal_conf->socket_mem[i] = 0;
+ internal_conf->force_socket_limits = 0;
/* zero out the NUMA limits config */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->socket_limit[i] = 0;
+ internal_conf->socket_limit[i] = 0;
/* zero out hugedir descriptors */
for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) {
- memset(&internal_cfg->hugepage_info[i], 0,
- sizeof(internal_cfg->hugepage_info[0]));
- internal_cfg->hugepage_info[i].lock_descriptor = -1;
+ memset(&internal_conf->hugepage_info[i], 0,
+ sizeof(internal_conf->hugepage_info[0]));
+ internal_conf->hugepage_info[i].lock_descriptor = -1;
}
- internal_cfg->base_virtaddr = 0;
+ internal_conf->base_virtaddr = 0;
#ifdef LOG_DAEMON
- internal_cfg->syslog_facility = LOG_DAEMON;
+ internal_conf->syslog_facility = LOG_DAEMON;
#endif
/* if set to NONE, interrupt mode is determined automatically */
- internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE;
- memset(internal_cfg->vfio_vf_token, 0,
- sizeof(internal_cfg->vfio_vf_token));
+ internal_conf->vfio_intr_mode = RTE_INTR_MODE_NONE;
+ memset(internal_conf->vfio_vf_token, 0,
+ sizeof(internal_conf->vfio_vf_token));
#ifdef RTE_LIBEAL_USE_HPET
- internal_cfg->no_hpet = 0;
+ internal_conf->no_hpet = 0;
#else
- internal_cfg->no_hpet = 1;
+ internal_conf->no_hpet = 1;
#endif
- internal_cfg->vmware_tsc_map = 0;
- internal_cfg->create_uio_dev = 0;
- internal_cfg->iova_mode = RTE_IOVA_DC;
- internal_cfg->user_mbuf_pool_ops_name = NULL;
- CPU_ZERO(&internal_cfg->ctrl_cpuset);
- internal_cfg->init_complete = 0;
- internal_cfg->max_simd_bitwidth.bitwidth = RTE_VECT_DEFAULT_SIMD_BITWIDTH;
- internal_cfg->max_simd_bitwidth.forced = 0;
+ internal_conf->vmware_tsc_map = 0;
+ internal_conf->create_uio_dev = 0;
+ internal_conf->iova_mode = RTE_IOVA_DC;
+ internal_conf->user_mbuf_pool_ops_name = NULL;
+ CPU_ZERO(&internal_conf->ctrl_cpuset);
+ internal_conf->init_complete = 0;
+ internal_conf->map_perfect = 0;
+ internal_conf->max_simd_bitwidth.bitwidth = RTE_VECT_DEFAULT_SIMD_BITWIDTH;
+ internal_conf->max_simd_bitwidth.forced = 0;
}
static int
@@ -1496,12 +1509,10 @@ eal_parse_simd_bitwidth(const char *arg)
}
static int
-eal_parse_base_virtaddr(const char *arg)
+eal_parse_base_virtaddr(const char *arg, struct internal_config *conf)
{
char *end;
uint64_t addr;
- struct internal_config *internal_conf =
- eal_get_internal_configuration();
errno = 0;
addr = strtoull(arg, &end, 16);
@@ -1521,7 +1532,7 @@ eal_parse_base_virtaddr(const char *arg)
* it can align to 2MB for x86. So this alignment can also be used
* on x86 and other architectures.
*/
- internal_conf->base_virtaddr =
+ conf->base_virtaddr =
RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
return 0;
@@ -1877,7 +1888,7 @@ eal_parse_common_option(int opt, const char *optarg,
}
break;
case OPT_BASE_VIRTADDR_NUM:
- if (eal_parse_base_virtaddr(optarg) < 0) {
+ if (eal_parse_base_virtaddr(optarg, conf) < 0) {
RTE_LOG(ERR, EAL, "invalid parameter for --"
OPT_BASE_VIRTADDR "\n");
return -1;
@@ -1933,9 +1944,9 @@ eal_auto_detect_cores(struct rte_config *cfg)
}
static void
-compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
+compute_ctrl_threads_cpuset(struct internal_config *internal_conf)
{
- rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset;
+ rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
rte_cpuset_t default_set;
unsigned int lcore_id;
@@ -1960,25 +1971,23 @@ compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
}
int
-eal_cleanup_config(struct internal_config *internal_cfg)
+eal_cleanup_config(struct internal_config *internal_conf)
{
- if (internal_cfg->hugefile_prefix != NULL)
- free(internal_cfg->hugefile_prefix);
- if (internal_cfg->hugepage_dir != NULL)
- free(internal_cfg->hugepage_dir);
- if (internal_cfg->user_mbuf_pool_ops_name != NULL)
- free(internal_cfg->user_mbuf_pool_ops_name);
+ if (internal_conf->hugefile_prefix != NULL)
+ free(internal_conf->hugefile_prefix);
+ if (internal_conf->hugepage_dir != NULL)
+ free(internal_conf->hugepage_dir);
+ if (internal_conf->user_mbuf_pool_ops_name != NULL)
+ free(internal_conf->user_mbuf_pool_ops_name);
return 0;
}
int
-eal_adjust_config(struct internal_config *internal_cfg)
+eal_adjust_config(struct internal_config *internal_conf)
{
int i;
struct rte_config *cfg = rte_eal_get_configuration();
- struct internal_config *internal_conf =
- eal_get_internal_configuration();
if (!core_parsed)
eal_auto_detect_cores(cfg);
@@ -1994,44 +2003,64 @@ eal_adjust_config(struct internal_config *internal_cfg)
lcore_config[cfg->main_lcore].core_role = ROLE_RTE;
}
- compute_ctrl_threads_cpuset(internal_cfg);
+ compute_ctrl_threads_cpuset(internal_conf);
/* if no memory amounts were requested, this will result in 0 and
* will be overridden later, right after eal_hugepage_info_init() */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->memory += internal_cfg->socket_mem[i];
+ internal_conf->memory += internal_conf->socket_mem[i];
return 0;
}
int
-eal_check_common_options(struct internal_config *internal_cfg)
+eal_sec_adjust_config(struct internal_config *internal_conf)
{
- struct rte_config *cfg = rte_eal_get_configuration();
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ struct internal_config *internal_conf_head;
+ internal_conf->process_type = RTE_PROC_SECONDARY;
+
+ internal_conf_head = rte_eal_sec_get_internal_config(0);
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ if (!internal_conf_head[i].pri_and_sec)
+ continue;
+ if (internal_conf == &internal_conf_head[i])
+ continue;
+ if (!strcmp(internal_conf_head[i].hugefile_prefix, internal_conf->hugefile_prefix))
+ return -EALREADY;
+ }
+
+ for (int i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_conf->memory += internal_conf->socket_mem[i];
- if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) {
+ return 0;
+}
+
+int
+eal_check_common_options(struct internal_config *internal_conf,
+ struct rte_config *cfg)
+{
+ if (!internal_conf->pri_and_sec &&
+ cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) {
RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n");
return -1;
}
- if (internal_cfg->process_type == RTE_PROC_INVALID) {
+ if (internal_conf->process_type == RTE_PROC_INVALID) {
RTE_LOG(ERR, EAL, "Invalid process type specified\n");
return -1;
}
- if (internal_cfg->hugefile_prefix != NULL &&
- strlen(internal_cfg->hugefile_prefix) < 1) {
+ if (internal_conf->hugefile_prefix != NULL &&
+ strlen(internal_conf->hugefile_prefix) < 1) {
RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n");
return -1;
}
- if (internal_cfg->hugepage_dir != NULL &&
- strlen(internal_cfg->hugepage_dir) < 1) {
+ if (internal_conf->hugepage_dir != NULL &&
+ strlen(internal_conf->hugepage_dir) < 1) {
RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n");
return -1;
}
- if (internal_cfg->user_mbuf_pool_ops_name != NULL &&
- strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) {
+ if (internal_conf->user_mbuf_pool_ops_name != NULL &&
+ strlen(internal_conf->user_mbuf_pool_ops_name) < 1) {
RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n");
return -1;
}
@@ -2040,18 +2069,18 @@ eal_check_common_options(struct internal_config *internal_cfg)
"option\n");
return -1;
}
- if (mem_parsed && internal_cfg->force_sockets == 1) {
+ if (mem_parsed && internal_conf->force_sockets == 1) {
RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot "
"be specified at the same time\n");
return -1;
}
- if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) {
+ if (internal_conf->no_hugetlbfs && internal_conf->force_sockets == 1) {
RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_MEM" cannot "
"be specified together with --"OPT_NO_HUGE"\n");
return -1;
}
- if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink &&
- !internal_cfg->in_memory) {
+ if (internal_conf->no_hugetlbfs && internal_conf->hugepage_unlink &&
+ !internal_conf->in_memory) {
RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot "
"be specified together with --"OPT_NO_HUGE"\n");
return -1;
@@ -2060,35 +2089,43 @@ eal_check_common_options(struct internal_config *internal_cfg)
RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT
" is only supported in non-legacy memory mode\n");
}
- if (internal_cfg->single_file_segments &&
- internal_cfg->hugepage_unlink &&
- !internal_cfg->in_memory) {
+ if (internal_conf->single_file_segments &&
+ internal_conf->hugepage_unlink &&
+ !internal_conf->in_memory) {
RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is "
"not compatible with --"OPT_HUGE_UNLINK"\n");
return -1;
}
- if (internal_cfg->legacy_mem &&
- internal_cfg->in_memory) {
+ if (internal_conf->legacy_mem &&
+ internal_conf->in_memory) {
RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible "
"with --"OPT_IN_MEMORY"\n");
return -1;
}
- if (internal_cfg->legacy_mem && internal_cfg->match_allocations) {
+ if (internal_conf->legacy_mem && internal_conf->match_allocations) {
RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible "
"with --"OPT_MATCH_ALLOCATIONS"\n");
return -1;
}
- if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) {
+ if (internal_conf->no_hugetlbfs && internal_conf->match_allocations) {
RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible "
"with --"OPT_MATCH_ALLOCATIONS"\n");
return -1;
}
- if (internal_cfg->legacy_mem && internal_cfg->memory == 0) {
+ if (internal_conf->legacy_mem && internal_conf->memory == 0) {
RTE_LOG(NOTICE, EAL, "Static memory layout is selected, "
"amount of reserved memory can be adjusted with "
"-m or --"OPT_SOCKET_MEM"\n");
}
+ if (internal_conf->map_perfect || internal_conf->pri_and_sec) {
+ if (!internal_conf->legacy_mem || internal_conf->in_memory || internal_conf->no_hugetlbfs) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" or "OPT_IN_MEMORY" or "OPT_NO_HUGE" "
+ "is not compatible with --"OPT_MAP_PERFECT" and "OPT_PRI_AND_SEC"\n");
+ return -1;
+ }
+ }
+
return 0;
}
diff --git a/lib/eal/common/eal_filesystem.h b/lib/eal/common/eal_filesystem.h
index 5d21f07c20..719678772f 100644
--- a/lib/eal/common/eal_filesystem.h
+++ b/lib/eal/common/eal_filesystem.h
@@ -23,7 +23,7 @@
/* sets up platform-specific runtime data dir */
int
-eal_create_runtime_dir(void);
+eal_create_runtime_dir(const int sec_idx);
int
eal_clean_runtime_dir(void);
@@ -32,17 +32,32 @@ eal_clean_runtime_dir(void);
const char *
eal_get_hugefile_prefix(void);
+const char *
+eal_sec_get_hugefile_prefix(const int sec_idx);
+
#define RUNTIME_CONFIG_FNAME "config"
static inline const char *
-eal_runtime_config_path(void)
+__eal_runtime_config_path(const char *runtime_dir)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(),
+ snprintf(buffer, sizeof(buffer), "%s/%s", runtime_dir,
RUNTIME_CONFIG_FNAME);
return buffer;
}
+static inline const char *
+eal_runtime_config_path(void)
+{
+ return __eal_runtime_config_path(rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_runtime_config_path(const char *runtime_dir)
+{
+ return __eal_runtime_config_path(runtime_dir);
+}
+
/** Path of primary/secondary communication unix socket file. */
#define MP_SOCKET_FNAME "mp_socket"
static inline const char *
@@ -57,12 +72,29 @@ eal_mp_socket_path(void)
#define FBARRAY_NAME_FMT "%s/fbarray_%s"
static inline const char *
-eal_get_fbarray_path(char *buffer, size_t buflen, const char *name) {
- snprintf(buffer, buflen, FBARRAY_NAME_FMT, rte_eal_get_runtime_dir(),
+__eal_get_fbarray_path(char *buffer, size_t buflen, const char *name,
+ const char *runtime_dir)
+{
+ snprintf(buffer, buflen, FBARRAY_NAME_FMT, runtime_dir,
name);
return buffer;
}
+static inline const char *
+eal_get_fbarray_path(char *buffer, size_t buflen, const char *name)
+{
+ return __eal_get_fbarray_path(buffer, buflen, name,
+ rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_get_fbarray_path(char *buffer, size_t buflen,
+ const char *name, const char *runtime_dir)
+{
+ return __eal_get_fbarray_path(buffer, buflen, name,
+ runtime_dir);
+}
+
/** Path of hugepage info file. */
#define HUGEPAGE_INFO_FNAME "hugepage_info"
static inline const char *
@@ -78,15 +110,27 @@ eal_hugepage_info_path(void)
/** Path of hugepage data file. */
#define HUGEPAGE_DATA_FNAME "hugepage_data"
static inline const char *
-eal_hugepage_data_path(void)
+__eal_hugepage_data_path(const char *runtime_dir)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(),
+ snprintf(buffer, sizeof(buffer), "%s/%s", runtime_dir,
HUGEPAGE_DATA_FNAME);
return buffer;
}
+static inline const char *
+eal_hugepage_data_path(void)
+{
+ return __eal_hugepage_data_path(rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_hugepage_data_path(const char *runtime_dir)
+{
+ return __eal_hugepage_data_path(runtime_dir);
+}
+
/** String format for hugepage map files. */
#define HUGEFILE_FMT "%s/%smap_%d"
static inline const char *
diff --git a/lib/eal/common/eal_internal_cfg.h b/lib/eal/common/eal_internal_cfg.h
index d6c0470eb8..8c326f2f87 100644
--- a/lib/eal/common/eal_internal_cfg.h
+++ b/lib/eal/common/eal_internal_cfg.h
@@ -94,8 +94,10 @@ struct internal_config {
unsigned int no_telemetry; /**< true to disable Telemetry */
struct simd_bitwidth max_simd_bitwidth;
/**< max simd bitwidth path to use */
+ volatile unsigned pri_and_sec;
+ volatile unsigned map_perfect;
};
-void eal_reset_internal_config(struct internal_config *internal_cfg);
+void eal_reset_internal_config(struct internal_config *internal_conf);
#endif /* EAL_INTERNAL_CFG_H */
diff --git a/lib/eal/common/eal_memalloc.h b/lib/eal/common/eal_memalloc.h
index ebc3a6f6c1..19ccee7891 100644
--- a/lib/eal/common/eal_memalloc.h
+++ b/lib/eal/common/eal_memalloc.h
@@ -83,6 +83,10 @@ eal_memalloc_get_seg_fd(int list_idx, int seg_idx);
int
eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd);
+int
+eal_sec_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const int switch_pri_and_sec, const int sec_idx);
+
/* returns 0 or -errno */
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd);
@@ -96,4 +100,7 @@ eal_memalloc_init(void);
int
eal_memalloc_cleanup(void);
+int
+eal_sec_memalloc_destroy(const int sec_idx);
+
#endif /* EAL_MEMALLOC_H */
diff --git a/lib/eal/common/eal_options.h b/lib/eal/common/eal_options.h
index 8e4f7202a2..95625c4002 100644
--- a/lib/eal/common/eal_options.h
+++ b/lib/eal/common/eal_options.h
@@ -87,6 +87,10 @@ enum {
OPT_NO_TELEMETRY_NUM,
#define OPT_FORCE_MAX_SIMD_BITWIDTH "force-max-simd-bitwidth"
OPT_FORCE_MAX_SIMD_BITWIDTH_NUM,
+#define OPT_PRI_AND_SEC "pri-and-sec"
+ OPT_PRI_AND_SEC_NUM,
+#define OPT_MAP_PERFECT "map-perfect"
+ OPT_MAP_PERFECT_NUM,
OPT_LONG_MAX_NUM
};
@@ -97,9 +101,10 @@ extern const struct option eal_long_options[];
int eal_parse_common_option(int opt, const char *argv,
struct internal_config *conf);
int eal_option_device_parse(void);
-int eal_adjust_config(struct internal_config *internal_cfg);
-int eal_cleanup_config(struct internal_config *internal_cfg);
-int eal_check_common_options(struct internal_config *internal_cfg);
+int eal_adjust_config(struct internal_config *internal_conf);
+int eal_sec_adjust_config(struct internal_config *internal_conf);
+int eal_cleanup_config(struct internal_config *internal_conf);
+int eal_check_common_options(struct internal_config *internal_conf, struct rte_config *cfg);
void eal_common_usage(void);
enum rte_proc_type_t eal_proc_type_detect(void);
int eal_plugins_init(void);
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 36bcc0b5a4..ac8af18773 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -103,7 +103,8 @@ int rte_eal_cpu_init(void);
* @return
* 0 on success, negative on error
*/
-int rte_eal_memseg_init(void);
+//int rte_eal_memseg_init(void);
+int rte_eal_memseg_init(const int switch_pri_and_sec, const int sec_idx);
/**
* Map memory
@@ -117,6 +118,9 @@ int rte_eal_memseg_init(void);
*/
int rte_eal_memory_init(void);
+int rte_eal_sec_memory_init(const int sec_idx);
+int rte_eal_sec_memory_cleanup(const int sec_idx);
+
/**
* Configure timers
*
@@ -413,7 +417,8 @@ int rte_eal_hugepage_init(void);
*
* This function is private to the EAL.
*/
-int rte_eal_hugepage_attach(void);
+//int rte_eal_hugepage_attach(void);
+int rte_eal_hugepage_attach(const int switch_pri_and_sec, const int sec_idx);
/**
* Detaches all memory mappings from a process.
@@ -689,6 +694,9 @@ eal_mem_set_dump(void *virt, size_t size, bool dump);
int
eal_set_runtime_dir(char *run_dir, size_t size);
+int
+eal_sec_set_runtime_dir(char *run_dir, size_t size, const int sec_idx);
+
/**
* Get the internal configuration structure.
*
@@ -738,4 +746,19 @@ int eal_asprintf(char **buffer, const char *format, ...);
eal_asprintf(buffer, format, ##__VA_ARGS__)
#endif
+
+/****** APIs for libnet ******/
+#include <rte_memory.h>
+
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg);
+
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg);
+
+int
+rte_sec_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg,
+ struct rte_config *rte_cfg);
+
#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/eal/include/rte_eal.h b/lib/eal/include/rte_eal.h
index 5a34a6acd9..c3259a4af3 100644
--- a/lib/eal/include/rte_eal.h
+++ b/lib/eal/include/rte_eal.h
@@ -472,9 +472,17 @@ rte_eal_mbuf_user_pool_ops(void);
* @return
* The runtime directory path of DPDK
*/
-const char *
+char *
rte_eal_get_runtime_dir(void);
+/****** APIs for libnet ******/
+char *rte_eal_sec_get_runtime_dir(const int sec_idx);
+struct rte_config *rte_eal_sec_get_configuration(const int sec_idx);
+struct internal_config *rte_eal_sec_get_internal_config(const int sec_idx);
+
+int rte_eal_sec_attach(int argc, char **argv);
+int rte_eal_sec_detach(const char *file_prefix, int length);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/eal/include/rte_fbarray.h b/lib/eal/include/rte_fbarray.h
index c64868711e..e35a0cc0b4 100644
--- a/lib/eal/include/rte_fbarray.h
+++ b/lib/eal/include/rte_fbarray.h
@@ -99,6 +99,10 @@ rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
int
rte_fbarray_attach(struct rte_fbarray *arr);
+int
+rte_sec_fbarray_attach(struct rte_fbarray *arr,
+ const int switch_pri_and_sec, const int sec_idx);
+
/**
* Deallocate resources for an already allocated and correctly set up
@@ -120,6 +124,8 @@ rte_fbarray_attach(struct rte_fbarray *arr);
int
rte_fbarray_destroy(struct rte_fbarray *arr);
+int
+rte_sec_fbarray_destroy(struct rte_fbarray *arr, const int sec_idx);
/**
* Deallocate resources for an already allocated and correctly set up
diff --git a/lib/eal/include/rte_memory.h b/lib/eal/include/rte_memory.h
index 6d018629ae..bf4c6098e3 100644
--- a/lib/eal/include/rte_memory.h
+++ b/lib/eal/include/rte_memory.h
@@ -143,7 +143,12 @@ rte_mem_iova2virt(rte_iova_t iova);
*/
struct rte_memseg *
rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
-
+/*
+__rte_experimental
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg);
+*/
/**
* Get memseg list corresponding to virtual memory address.
*
@@ -154,7 +159,11 @@ rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
*/
struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *virt);
-
+/*
+__rte_experimental
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg);
+*/
/**
* Memseg walk function prototype.
*
@@ -268,7 +277,12 @@ rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg);
*/
int
rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg);
-
+/*
+__rte_experimental
+int
+rte_sec_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg,
+ struct rte_config *rte_cfg);
+*/
/**
* Walk each VA-contiguous area without performing any locking.
*
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index 47c2186bee..a3afa80d99 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -88,8 +88,10 @@ int rte_cycles_vmware_tsc_map;
static const char *default_runtime_dir = "/var/run";
+static unsigned int sec_count = 0;
+
int
-eal_create_runtime_dir(void)
+eal_create_runtime_dir(const int sec_idx)
{
const char *directory = default_runtime_dir;
const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
@@ -113,8 +115,9 @@ eal_create_runtime_dir(void)
}
/* create prefix-specific subdirectory under DPDK runtime dir */
- ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
- tmp, eal_get_hugefile_prefix());
+ const char *prefix = (sec_idx < 0) ? eal_get_hugefile_prefix() :
+ eal_sec_get_hugefile_prefix(sec_idx);
+ ret = snprintf(run_dir, sizeof(run_dir), "%s/%s", tmp, prefix);
if (ret < 0 || ret == sizeof(run_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
@@ -137,7 +140,9 @@ eal_create_runtime_dir(void)
return -1;
}
- if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
+ ret = (sec_idx < 0) ? eal_set_runtime_dir(run_dir, sizeof(run_dir)) :
+ eal_sec_set_runtime_dir(run_dir, sizeof(run_dir), sec_idx);
+ if (ret)
return -1;
return 0;
@@ -355,21 +360,22 @@ rte_eal_config_create(void)
/* attach to an existing shared memory config */
static int
-rte_eal_config_attach(void)
+__rte_eal_config_attach(const int mmap_flags, int *mem_cfg_fd,
+ const char *runtime_dir,
+ const struct internal_config *internal_conf,
+ struct rte_config *rte_cfg)
{
- struct rte_config *config = rte_eal_get_configuration();
struct rte_mem_config *mem_config;
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ int mcfg_fd = *mem_cfg_fd;
- const char *pathname = eal_runtime_config_path();
+ const char *pathname = eal_sec_runtime_config_path(runtime_dir);
if (internal_conf->no_shconf)
return 0;
- if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR);
- if (mem_cfg_fd < 0) {
+ if (mcfg_fd < 0){
+ mcfg_fd = open(pathname, O_RDWR);
+ if (mcfg_fd < 0) {
RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n",
pathname);
return -1;
@@ -378,20 +384,32 @@ rte_eal_config_attach(void)
/* map it as read-only first */
mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config),
- PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
+ mmap_flags, MAP_SHARED, mcfg_fd, 0);
if (mem_config == MAP_FAILED) {
- close(mem_cfg_fd);
- mem_cfg_fd = -1;
+ close(mcfg_fd);
+ mcfg_fd = -1;
RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n",
errno, strerror(errno));
return -1;
}
- config->mem_config = mem_config;
+ rte_cfg->mem_config = mem_config;
+ *mem_cfg_fd = mcfg_fd;
return 0;
}
+static int
+rte_eal_config_attach(void)
+{
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
+
+ return __rte_eal_config_attach(PROT_READ, &mem_cfg_fd,
+ rte_eal_get_runtime_dir(), internal_conf,
+ rte_eal_get_configuration());
+}
+
+
/* reattach the shared config at exact memory location primary process has it */
static int
rte_eal_config_reattach(void)
@@ -508,6 +526,45 @@ rte_config_init(void)
return 0;
}
+static void
+rte_sec_config_init(const int sec_idx)
+{
+ int mem_cfg_fd = -1;
+ int mmap_flags = PROT_READ | PROT_WRITE;
+
+ struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ struct internal_config *internal_conf = rte_eal_sec_get_internal_config(sec_idx);
+
+ rte_cfg->process_type = internal_conf->process_type;
+
+ __rte_eal_config_attach(mmap_flags, &mem_cfg_fd,
+ rte_eal_sec_get_runtime_dir(sec_idx),
+ internal_conf, rte_cfg);
+
+ close(mem_cfg_fd);
+}
+
+static int
+eal_sec_config_cleanup(const int sec_idx)
+{
+ int ret;
+ struct rte_config *lc_rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ struct internal_config *lc_internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+ char *lc_runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+
+ ret = munmap(lc_rte_cfg->mem_config, sizeof(*lc_rte_cfg->mem_config));
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap config memory!\n");
+ return -1;
+ }
+
+ memset(lc_rte_cfg, 0, sizeof(*lc_rte_cfg));
+ memset(lc_internal_cfg, 0, sizeof(*lc_internal_cfg));
+ memset(lc_runtime_dir, 0, PATH_MAX);
+
+ return 0;
+}
+
/* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
static void
eal_hugedirs_unlock(void)
@@ -548,6 +605,7 @@ eal_usage(const char *prgname)
" --"OPT_LEGACY_MEM" Legacy memory mode (no dynamic allocation, contiguous segments)\n"
" --"OPT_SINGLE_FILE_SEGMENTS" Put all hugepage memory in single files\n"
" --"OPT_MATCH_ALLOCATIONS" Free hugepages exactly as allocated\n"
+ " --"OPT_MAP_PERFECT" Map virtual addresses according to configured hugepage size\n"
"\n");
/* Allow the application to print its usage message too if hook is set */
if (hook) {
@@ -678,7 +736,9 @@ eal_log_level_parse(int argc, char **argv)
/* Parse the argument given in the command line of the application */
static int
-eal_parse_args(int argc, char **argv)
+__eal_parse_args(int argc, char **argv, const int sec_idx,
+ struct internal_config *internal_conf,
+ struct rte_config *rte_cfg)
{
int opt, ret;
char **argvopt;
@@ -687,8 +747,6 @@ eal_parse_args(int argc, char **argv)
const int old_optind = optind;
const int old_optopt = optopt;
char * const old_optarg = optarg;
- struct internal_config *internal_conf =
- eal_get_internal_configuration();
argvopt = argv;
optind = 1;
@@ -816,6 +874,9 @@ eal_parse_args(int argc, char **argv)
case OPT_MATCH_ALLOCATIONS_NUM:
internal_conf->match_allocations = 1;
break;
+ case OPT_MAP_PERFECT_NUM:
+ internal_conf->map_perfect = 1;
+ break;
default:
if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
@@ -837,7 +898,7 @@ eal_parse_args(int argc, char **argv)
}
/* create runtime data directory. In no_shconf mode, skip any errors */
- if (eal_create_runtime_dir() < 0) {
+ if (eal_create_runtime_dir(sec_idx) < 0) {
if (internal_conf->no_shconf == 0) {
RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
ret = -1;
@@ -846,13 +907,18 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(WARNING, EAL, "No DPDK runtime directory created\n");
}
- if (eal_adjust_config(internal_conf) != 0) {
- ret = -1;
- goto out;
+ if (!internal_conf->pri_and_sec) {
+ ret = eal_adjust_config(internal_conf);
+ if (ret != 0)
+ goto out;
+ } else {
+ ret = eal_sec_adjust_config(internal_conf);
+ if (ret != 0)
+ goto out;
}
/* sanity checks */
- if (eal_check_common_options(internal_conf) != 0) {
+ if (eal_check_common_options(internal_conf, rte_cfg) != 0) {
eal_usage(prgname);
ret = -1;
goto out;
@@ -871,6 +937,24 @@ eal_parse_args(int argc, char **argv)
return ret;
}
+static int
+eal_parse_args(int argc, char **argv)
+{
+ struct internal_config *internal_conf = eal_get_internal_configuration();
+
+ return __eal_parse_args(argc, argv, -1,
+ internal_conf,
+ rte_eal_get_configuration());
+}
+
+static int
+eal_sec_parse_args(int argc, char **argv, const int sec_idx)
+{
+ return __eal_parse_args(argc, argv, sec_idx,
+ rte_eal_sec_get_internal_config(sec_idx),
+ rte_eal_sec_get_configuration(sec_idx));
+}
+
static int
check_socket(const struct rte_memseg_list *msl, void *arg)
{
@@ -1437,3 +1521,101 @@ rte_eal_check_module(const char *module_name)
/* Module has been found */
return 1;
}
+
+
+/****** APIs for libnet ******/
+int
+rte_eal_sec_attach(int argc, char **argv)
+{
+ int ret;
+ int sec_idx = -1;
+ struct internal_config *lc_internal_conf = NULL;
+
+ if (sec_count >= RTE_MAX_SECONDARY) {
+ RTE_LOG(ERR, EAL, "Too many secondary processes: %d.\n", sec_count);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ lc_internal_conf = rte_eal_sec_get_internal_config(i);
+ if (lc_internal_conf->pri_and_sec == 0) {
+ lc_internal_conf->pri_and_sec = 1;
+ sec_idx = i;
+ break;
+ }
+ }
+
+ eal_reset_internal_config(lc_internal_conf);
+
+ ret = eal_sec_parse_args(argc, argv, sec_idx);
+ if (ret < 0) {
+ if (ret == -EALREADY) {
+ RTE_LOG(ERR, EAL, "file_refix %s already called initialization.\n",
+ lc_internal_conf->hugefile_prefix);
+ rte_errno = EALREADY;
+ } else {
+ RTE_LOG(ERR, EAL, "Invalid 'command line' arguments.\n");
+ rte_errno = EINVAL;
+ }
+ return -1;
+ }
+
+ rte_sec_config_init(sec_idx);
+
+ ret = rte_eal_sec_memory_init(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot init memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ sec_count++;
+ return 0;
+}
+
+int
+rte_eal_sec_detach(const char *file_prefix, int length)
+{
+ int ret;
+ int sec_idx = -1;
+ struct internal_config *lc_internal_conf = NULL;
+
+ if (!file_prefix || length <= 0) {
+ RTE_LOG(ERR, EAL, "Invalid 'file_prefix or length' arguments.\n");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ lc_internal_conf = rte_eal_sec_get_internal_config(i);
+ if (lc_internal_conf->pri_and_sec == 0)
+ continue;
+ if (!strncmp(lc_internal_conf->hugefile_prefix, file_prefix, length)) {
+ sec_idx = i;
+ break;
+ }
+ }
+ if (sec_idx == -1) {
+ RTE_LOG(ERR, EAL, "Cannot find file_prefix %s.\n", file_prefix);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ ret = rte_eal_sec_memory_cleanup(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot cleanup memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ ret = eal_sec_config_cleanup(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot cleanup hugepage sharefile.\n");
+ rte_errno = EACCES;
+ return -1;
+ }
+
+ sec_count--;
+ return 0;
+}
diff --git a/lib/eal/linux/eal_hugepage_info.c b/lib/eal/linux/eal_hugepage_info.c
index 9fb0e968db..41acf180ee 100644
--- a/lib/eal/linux/eal_hugepage_info.c
+++ b/lib/eal/linux/eal_hugepage_info.c
@@ -389,7 +389,7 @@ calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
*/
total_pages = 0;
/* we also don't want to do this for legacy init */
- if (!internal_conf->legacy_mem)
+ if (!internal_conf->legacy_mem || internal_conf->map_perfect)
for (i = 0; i < rte_socket_count(); i++) {
int socket = rte_socket_id_by_idx(i);
unsigned int num_pages =
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index fc354f4a17..dac9098c8c 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -39,6 +39,7 @@
#include <rte_errno.h>
#include <rte_memory.h>
#include <rte_spinlock.h>
+#include <rte_eal_paging.h>
#include "eal_filesystem.h"
#include "eal_internal_cfg.h"
@@ -95,12 +96,14 @@ static int fallocate_supported = -1; /* unknown */
* they will be initialized at startup, and filled as we allocate/deallocate
* segments.
*/
-static struct {
+struct fd_list{
int *fds; /**< dynamically allocated array of segment lock fd's */
int memseg_list_fd; /**< memseg list fd */
int len; /**< total length of the array */
int count; /**< entries used in an array */
-} fd_list[RTE_MAX_MEMSEG_LISTS];
+};
+static struct fd_list fd_list[RTE_MAX_MEMSEG_LISTS];
+static struct fd_list sec_fd_list[RTE_MAX_SECONDARY][RTE_MAX_MEMSEG_LISTS];
/** local copy of a memory map, used to synchronize memory hotplug in MP */
static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
@@ -1462,7 +1465,7 @@ secondary_msl_destroy_walk(const struct rte_memseg_list *msl,
}
static int
-alloc_list(int list_idx, int len)
+__alloc_list(int list_idx, int len, struct fd_list *fd_ls)
{
int *data;
int i;
@@ -1470,7 +1473,7 @@ alloc_list(int list_idx, int len)
eal_get_internal_configuration();
/* single-file segments mode does not need fd list */
- if (!internal_conf->single_file_segments) {
+ if (!internal_conf->single_file_segments) { // sec todo
/* ensure we have space to store fd per each possible segment */
data = malloc(sizeof(int) * len);
if (data == NULL) {
@@ -1480,24 +1483,36 @@ alloc_list(int list_idx, int len)
/* set all fd's as invalid */
for (i = 0; i < len; i++)
data[i] = -1;
- fd_list[list_idx].fds = data;
- fd_list[list_idx].len = len;
+ fd_ls[list_idx].fds = data;
+ fd_ls[list_idx].len = len;
} else {
- fd_list[list_idx].fds = NULL;
- fd_list[list_idx].len = 0;
+ fd_ls[list_idx].fds = NULL;
+ fd_ls[list_idx].len = 0;
}
- fd_list[list_idx].count = 0;
- fd_list[list_idx].memseg_list_fd = -1;
+ fd_ls[list_idx].count = 0;
+ fd_ls[list_idx].memseg_list_fd = -1;
return 0;
}
+static int
+alloc_list(int list_idx, int len)
+{
+ return __alloc_list(list_idx, len, fd_list);
+}
+
+static int
+sec_alloc_list(int list_idx, int len, struct fd_list *fd_ls)
+{
+ return __alloc_list(list_idx, len, fd_ls);
+}
+
static int
destroy_list(int list_idx)
{
const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ eal_get_internal_configuration();
/* single-file segments mode does not need fd list */
if (!internal_conf->single_file_segments) {
@@ -1552,29 +1567,54 @@ fd_list_destroy_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
return destroy_list(msl_idx);
}
-int
-eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+static int
+__eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const struct rte_config *rte_cfg, struct fd_list *fd_ls)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
-
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
+
/* single file segments mode doesn't support individual segment fd's */
- if (internal_conf->single_file_segments)
+ if (internal_conf->single_file_segments) // sec todo
return -ENOTSUP;
/* if list is not allocated, allocate it */
- if (fd_list[list_idx].len == 0) {
+ if (fd_ls[list_idx].len == 0) {
int len = mcfg->memsegs[list_idx].memseg_arr.len;
- if (alloc_list(list_idx, len) < 0)
+ if (sec_alloc_list(list_idx, len, fd_ls) < 0)
return -ENOMEM;
}
- fd_list[list_idx].fds[seg_idx] = fd;
+ fd_ls[list_idx].fds[seg_idx] = fd;
return 0;
}
+int
+eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+{
+ return __eal_memalloc_set_seg_fd(list_idx, seg_idx, fd,
+ rte_eal_get_configuration(), fd_list);
+}
+
+int
+eal_sec_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const int switch_pri_and_sec, const int sec_idx)
+{
+ struct rte_config *rte_cfg = NULL;
+ struct fd_list *fd_ls = NULL;
+
+ if (!switch_pri_and_sec) {
+ rte_cfg = rte_eal_get_configuration();
+ fd_ls = &fd_list[0];
+ } else {
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ fd_ls = &sec_fd_list[sec_idx][0];
+ }
+
+ return __eal_memalloc_set_seg_fd(list_idx, seg_idx, fd, rte_cfg, fd_ls);
+}
+
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd)
{
@@ -1749,3 +1789,49 @@ eal_memalloc_init(void)
return -1;
return 0;
}
+
+static int
+fd_sec_list_destroy_walk(const struct rte_memseg_list *msl, const int sec_idx)
+{
+ struct rte_mem_config *mcfg = rte_eal_sec_get_configuration(sec_idx)->mem_config;
+ struct fd_list *fd_ls = sec_fd_list[sec_idx];
+ int list_idx;
+
+ list_idx = msl - mcfg->memsegs;
+ if (fd_ls[list_idx].len != 0) {
+ free(fd_ls[list_idx].fds);
+ /* We have closed fd, seeing in function of eal_legacy_hugepage_attach. */
+ //close(fd_ls[list_idx].fds[seg_idx]);
+ }
+ memset(&fd_ls[list_idx], 0, sizeof(fd_ls[list_idx]));
+
+ return 0;
+}
+
+int
+eal_sec_memalloc_destroy(const int sec_idx)
+{
+ struct rte_mem_config *mcfg = rte_eal_sec_get_configuration(sec_idx)->mem_config;
+ int i, ret = 0;
+
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+ if (msl->base_va == NULL)
+ continue;
+
+ if (fd_sec_list_destroy_walk(msl, sec_idx)) {
+ RTE_LOG(ERR, EAL, "Failed to clear secondary fd_list.\n");
+ return -1;
+ }
+
+ ret = rte_sec_fbarray_destroy(&msl->memseg_arr, sec_idx);
+ if (ret)
+ return ret;
+
+ rte_mem_unmap(msl->base_va, msl->len);
+ memset(msl, 0, sizeof(*msl));
+ }
+
+ return 0;
+}
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 03a4f2dd2d..4d78a47e0a 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -992,6 +992,7 @@ static int
remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
{
int cur_page, seg_start_page, new_memseg, ret;
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
seg_start_page = 0;
for (cur_page = 0; cur_page < n_pages; cur_page++) {
@@ -1017,10 +1018,10 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
* address to lower address. Here, physical addresses are in
* descending order.
*/
- else if ((prev->physaddr - cur->physaddr) != cur->size)
+ else if (!internal_conf->map_perfect && (prev->physaddr - cur->physaddr) != cur->size)
new_memseg = 1;
#else
- else if ((cur->physaddr - prev->physaddr) != cur->size)
+ else if (!internal_conf->map_perfect && (cur->physaddr - prev->physaddr) != cur->size)
new_memseg = 1;
#endif
@@ -1235,6 +1236,24 @@ eal_legacy_hugepage_init(void)
for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
+
+ if (internal_conf->map_perfect) {
+ int sys_num_pages = 0;
+ int need_num_pages = 0;
+ struct rte_memseg_list *msl;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ sys_num_pages += internal_conf->hugepage_info[i].num_pages[j];
+ }
+
+ for (j = 0; j < RTE_MAX_MEMSEG_LISTS; j++) {
+ msl = &mcfg->memsegs[j];
+ if (internal_conf->hugepage_info[i].hugepage_sz == msl->page_sz)
+ need_num_pages += msl->memseg_arr.len;
+ }
+
+ internal_conf->hugepage_info[i].num_pages[0] = RTE_MIN(sys_num_pages, need_num_pages);
+ }
nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
}
@@ -1316,8 +1335,13 @@ eal_legacy_hugepage_init(void)
goto fail;
}
- qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
- sizeof(struct hugepage_file), cmp_physaddr);
+ /* continuous physical memory does not bring performance improvements,
+ * so no sorting is performed for quick startup.
+ */
+ if (!internal_conf->map_perfect) {
+ qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
+ sizeof(struct hugepage_file), cmp_physaddr);
+ }
/* we have processed a num of hugepages of this size, so inc offset */
hp_offset += hpi->num_pages[0];
@@ -1502,9 +1526,9 @@ getFileSize(int fd)
* in order to form a contiguous block in the virtual memory space
*/
static int
-eal_legacy_hugepage_attach(void)
+eal_legacy_hugepage_attach(const int switch_pri_and_sec, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = NULL;
struct hugepage_file *hp = NULL;
unsigned int num_hp = 0;
unsigned int i = 0;
@@ -1512,6 +1536,22 @@ eal_legacy_hugepage_attach(void)
off_t size = 0;
int fd, fd_hugepage = -1;
+ struct rte_config *rte_cfg = NULL;
+ struct internal_config *internal_conf = NULL;
+ char *runtime_dir = NULL;
+
+ if (!switch_pri_and_sec) {
+ runtime_dir = rte_eal_get_runtime_dir();
+ rte_cfg = rte_eal_get_configuration();
+ internal_conf = eal_get_internal_configuration();
+ } else {
+ runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ internal_conf = rte_eal_sec_get_internal_config(sec_idx);
+ }
+
+ mcfg = rte_cfg->mem_config;
+
if (aslr_enabled() > 0) {
RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
"(ASLR) is enabled in the kernel.\n");
@@ -1519,10 +1559,10 @@ eal_legacy_hugepage_attach(void)
"into secondary processes\n");
}
- fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
+ fd_hugepage = open(eal_sec_hugepage_data_path(runtime_dir), O_RDONLY);
if (fd_hugepage < 0) {
RTE_LOG(ERR, EAL, "Could not open %s\n",
- eal_hugepage_data_path());
+ eal_sec_hugepage_data_path(runtime_dir));
goto error;
}
@@ -1530,7 +1570,7 @@ eal_legacy_hugepage_attach(void)
hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
if (hp == MAP_FAILED) {
RTE_LOG(ERR, EAL, "Could not mmap %s\n",
- eal_hugepage_data_path());
+ eal_sec_hugepage_data_path(runtime_dir));
goto error;
}
@@ -1577,13 +1617,13 @@ eal_legacy_hugepage_attach(void)
}
/* find segment data */
- msl = rte_mem_virt2memseg_list(map_addr);
+ msl = rte_sec_mem_virt2memseg_list(map_addr, rte_cfg);
if (msl == NULL) {
RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
__func__);
goto mmap_error;
}
- ms = rte_mem_virt2memseg(map_addr, msl);
+ ms = rte_sec_mem_virt2memseg(map_addr, msl, rte_cfg);
if (ms == NULL) {
RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
__func__);
@@ -1598,8 +1638,16 @@ eal_legacy_hugepage_attach(void)
goto mmap_error;
}
+ /* No hugefile lock is required in PRI_AND_SEC mode, close it
+ * to avoid opening too much fd.
+ */
+ if (internal_conf->pri_and_sec) {
+ close(fd);
+ fd = -1;
+ }
+
/* store segment fd internally */
- if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
+ if (eal_sec_memalloc_set_seg_fd(msl_idx, ms_idx, fd, switch_pri_and_sec, sec_idx) < 0)
RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
rte_strerror(rte_errno));
}
@@ -1648,13 +1696,17 @@ rte_eal_hugepage_init(void)
}
int
-rte_eal_hugepage_attach(void)
+rte_eal_hugepage_attach(const int switch_pri_and_sec, const int sec_idx)
{
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ struct internal_config *internal_conf;
+
+ if (!switch_pri_and_sec)
+ internal_conf = eal_get_internal_configuration();
+ else
+ internal_conf = rte_eal_sec_get_internal_config(sec_idx);
return internal_conf->legacy_mem ?
- eal_legacy_hugepage_attach() :
+ eal_legacy_hugepage_attach(switch_pri_and_sec, sec_idx) :
eal_hugepage_attach();
}
@@ -1873,9 +1925,10 @@ memseg_primary_init(void)
}
static int
-memseg_secondary_init(void)
+memseg_secondary_init(struct rte_config *rte_cfg,
+ const int switch_pri_and_sec, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
int msl_idx = 0;
struct rte_memseg_list *msl;
@@ -1887,7 +1940,7 @@ memseg_secondary_init(void)
if (msl->memseg_arr.len == 0)
continue;
- if (rte_fbarray_attach(&msl->memseg_arr)) {
+ if (rte_sec_fbarray_attach(&msl->memseg_arr, switch_pri_and_sec, sec_idx)) {
RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
return -1;
}
@@ -1903,11 +1956,18 @@ memseg_secondary_init(void)
}
int
-rte_eal_memseg_init(void)
+rte_eal_memseg_init(const int switch_pri_and_sec, const int sec_idx)
{
/* increase rlimit to maximum */
struct rlimit lim;
+ struct rte_config *rte_cfg = NULL;
+ if (!switch_pri_and_sec) {
+ rte_cfg = rte_eal_get_configuration();
+ } else {
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ }
+
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
const struct internal_config *internal_conf =
eal_get_internal_configuration();
@@ -1935,11 +1995,11 @@ rte_eal_memseg_init(void)
}
#endif
- return rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ return rte_cfg->process_type == RTE_PROC_PRIMARY ?
#ifndef RTE_ARCH_64
memseg_primary_init_32() :
#else
memseg_primary_init() :
#endif
- memseg_secondary_init();
+ memseg_secondary_init(rte_cfg, switch_pri_and_sec, sec_idx);
}
diff --git a/lib/ring/rte_ring.h b/lib/ring/rte_ring.h
index da17ed6d7c..ef18a2b39b 100644
--- a/lib/ring/rte_ring.h
+++ b/lib/ring/rte_ring.h
@@ -802,6 +802,81 @@ rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
n, available);
}
+/****** APIs for libnet ******/
+static __rte_always_inline unsigned
+rte_ring_cn_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t old_head = r->prod.tail;
+ rte_smp_rmb();
+
+ const uint32_t entries = r->cons.head - old_head;
+ if (n > entries) {
+ n = entries;
+ }
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ r->prod.head = old_head + n;
+ rte_smp_rmb();
+
+ __rte_ring_dequeue_elems(r, old_head, obj_table, sizeof(void *), n);
+ return n;
+}
+
+static __rte_always_inline void
+rte_ring_cn_enqueue(struct rte_ring *r)
+{
+ rte_smp_wmb();
+ r->prod.tail = r->prod.head;
+}
+
+static __rte_always_inline unsigned
+rte_ring_en_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t old_tail = r->cons.tail;
+ rte_smp_rmb();
+
+ const uint32_t entries = r->prod.tail - old_tail;
+ if (n > entries) {
+ n = entries;
+ }
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ const uint32_t new_tail = old_tail + n;
+ rte_smp_rmb();
+
+ __rte_ring_dequeue_elems(r, old_tail, obj_table, sizeof(void *), n);
+ rte_smp_rmb();
+
+ r->cons.tail = new_tail;
+ return n;
+}
+
+static __rte_always_inline unsigned
+rte_ring_en_enqueue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t capacity = r->capacity;
+ const uint32_t old_head = r->cons.head;
+ rte_smp_rmb();
+
+ const uint32_t entries = capacity + r->cons.tail - old_head;
+ if (n > entries) {
+ return 0;
+ }
+
+ const uint32_t new_head = old_head + n;
+ rte_smp_rmb();
+
+ __rte_ring_enqueue_elems(r, old_head, obj_table, sizeof(void *), n);
+ rte_smp_wmb();
+
+ r->cons.head = new_head;
+ return n;
+}
+
#ifdef __cplusplus
}
#endif
--
2.27.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。