1 Star 0 Fork 75

mengxuanzhang/dpdk

forked from src-openEuler/dpdk 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0011-dpdk-add-support-for-gazellle.patch 56.07 KB
一键复制 编辑 原始数据 按行查看 历史
jinag12 提交于 2024-01-12 02:37 . add self-developed patches
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948
From cd9b1ceffcbfe0557e97d86c4f2a1e117039d3c0 Mon Sep 17 00:00:00 2001
From: jiangheng <jiangheng14@huawei.com>
Date: Thu, 11 Jan 2024 19:10:40 +0800
Subject: dpdk add support for gazelle
---
config/rte_config.h | 1 +
lib/eal/common/eal_common_config.c | 43 ++++-
lib/eal/common/eal_common_dynmem.c | 67 +++++++-
lib/eal/common/eal_common_fbarray.c | 115 ++++++++++++--
lib/eal/common/eal_common_memory.c | 82 ++++++++--
lib/eal/common/eal_common_memzone.c | 2 +-
lib/eal/common/eal_common_options.c | 61 ++++++--
lib/eal/common/eal_filesystem.h | 58 ++++++-
lib/eal/common/eal_internal_cfg.h | 2 +
lib/eal/common/eal_memalloc.h | 7 +
lib/eal/common/eal_options.h | 11 +-
lib/eal/common/eal_private.h | 29 +++-
lib/eal/include/rte_eal.h | 10 +-
lib/eal/include/rte_fbarray.h | 6 +
lib/eal/include/rte_memory.h | 20 ++-
lib/eal/linux/eal.c | 234 +++++++++++++++++++++++++---
lib/eal/linux/eal_hugepage_info.c | 2 +-
lib/eal/linux/eal_memalloc.c | 129 ++++++++++++---
lib/eal/linux/eal_memory.c | 104 ++++++++++---
lib/eal/unix/eal_filesystem.c | 11 +-
lib/eal/version.map | 2 +
lib/ring/rte_ring.h | 75 +++++++++
22 files changed, 938 insertions(+), 133 deletions(-)
diff --git a/config/rte_config.h b/config/rte_config.h
index da265d7..8fdadc6 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -35,6 +35,7 @@
#define RTE_MAX_MEM_MB_PER_LIST 32768
#define RTE_MAX_MEMSEG_PER_TYPE 32768
#define RTE_MAX_MEM_MB_PER_TYPE 65536
+#define RTE_MAX_SECONDARY 256
#define RTE_MAX_TAILQ 32
#define RTE_LOG_DP_LEVEL RTE_LOG_INFO
#define RTE_MAX_VFIO_CONTAINERS 64
diff --git a/lib/eal/common/eal_common_config.c b/lib/eal/common/eal_common_config.c
index 0daf0f3..a6421fb 100644
--- a/lib/eal/common/eal_common_config.c
+++ b/lib/eal/common/eal_common_config.c
@@ -21,16 +21,27 @@ static char runtime_dir[PATH_MAX];
/* internal configuration */
static struct internal_config internal_config;
-const char *
+/****** APIs for libnet ******/
+static char sec_runtime_dir[RTE_MAX_SECONDARY][PATH_MAX];
+static struct rte_config sec_rte_config[RTE_MAX_SECONDARY];
+static struct internal_config sec_internal_config[RTE_MAX_SECONDARY];
+
+char *
rte_eal_get_runtime_dir(void)
{
return runtime_dir;
}
-int
-eal_set_runtime_dir(const char *run_dir)
+char *
+rte_eal_sec_get_runtime_dir(const int sec_idx)
+{
+ return sec_runtime_dir[sec_idx];
+}
+
+static int
+set_runtime_dir(char *dst_dir, char *src_dir)
{
- if (strlcpy(runtime_dir, run_dir, PATH_MAX) >= PATH_MAX) {
+ if (strlcpy(dst_dir, src_dir, PATH_MAX) >= PATH_MAX) {
RTE_LOG(ERR, EAL, "Runtime directory string too long\n");
return -1;
}
@@ -38,6 +49,18 @@ eal_set_runtime_dir(const char *run_dir)
return 0;
}
+int
+eal_sec_set_runtime_dir(char *run_dir, const int sec_idx)
+{
+ return set_runtime_dir(sec_runtime_dir[sec_idx], run_dir);
+}
+
+int
+eal_set_runtime_dir(char *run_dir)
+{
+ return set_runtime_dir(runtime_dir, run_dir);
+}
+
/* Return a pointer to the configuration structure */
struct rte_config *
rte_eal_get_configuration(void)
@@ -45,6 +68,18 @@ rte_eal_get_configuration(void)
return &rte_config;
}
+struct rte_config *
+rte_eal_sec_get_configuration(const int sec_idx)
+{
+ return &sec_rte_config[sec_idx];
+}
+
+struct internal_config *
+rte_eal_sec_get_internal_config(const int sec_idx)
+{
+ return &sec_internal_config[sec_idx];
+}
+
/* Return a pointer to the internal configuration structure */
struct internal_config *
eal_get_internal_configuration(void)
diff --git a/lib/eal/common/eal_common_dynmem.c b/lib/eal/common/eal_common_dynmem.c
index 95da55d..89e2acd 100644
--- a/lib/eal/common/eal_common_dynmem.c
+++ b/lib/eal/common/eal_common_dynmem.c
@@ -17,6 +17,50 @@
/** @file Functions common to EALs that support dynamic memory allocation. */
+static int
+eal_sec_set_num_pages(struct internal_config *internal_conf,
+ struct hugepage_info *used_hp)
+{
+ int ret;
+ int hp_sz_idx;
+ uint64_t memory[RTE_MAX_NUMA_NODES];
+
+ if (!internal_conf || !used_hp) {
+ return -1;
+ }
+
+ for (hp_sz_idx = 0;
+ hp_sz_idx < (int) internal_conf->num_hugepage_sizes;
+ hp_sz_idx++) {
+ struct hugepage_info *hpi;
+ hpi = &internal_conf->hugepage_info[hp_sz_idx];
+ used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+ }
+
+ for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
+ memory[hp_sz_idx] = internal_conf->socket_mem[hp_sz_idx];
+
+ ret = eal_dynmem_calc_num_pages_per_socket(memory,
+ internal_conf->hugepage_info, used_hp,
+ internal_conf->num_hugepage_sizes);
+
+ return ret;
+}
+
+static int
+eal_sec_get_num_pages(const struct hugepage_info *used_hp,
+ uint64_t hugepage_sz, int socket)
+{
+ int hp_sz_idx;
+
+ for (hp_sz_idx = 0; hp_sz_idx < MAX_HUGEPAGE_SIZES; hp_sz_idx++) {
+ if (used_hp[hp_sz_idx].hugepage_sz == hugepage_sz)
+ return used_hp[hp_sz_idx].num_pages[socket];
+ }
+
+ return 0;
+}
+
int
eal_dynmem_memseg_lists_init(void)
{
@@ -30,6 +74,7 @@ eal_dynmem_memseg_lists_init(void)
uint64_t max_mem, max_mem_per_type;
unsigned int max_seglists_per_type;
unsigned int n_memtypes, cur_type;
+ struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
struct internal_config *internal_conf =
eal_get_internal_configuration();
@@ -37,6 +82,14 @@ eal_dynmem_memseg_lists_init(void)
if (internal_conf->no_hugetlbfs)
return 0;
+ if (internal_conf->map_perfect) {
+ memset(used_hp, 0, sizeof(used_hp));
+ ret = eal_sec_set_num_pages(internal_conf, used_hp);
+ if (ret == -1) {
+ RTE_LOG(ERR, EAL, "Cannot get num pages\n");
+ }
+ }
+
/*
* figuring out amount of memory we're going to have is a long and very
* involved process. the basic element we're operating with is a memory
@@ -132,6 +185,7 @@ eal_dynmem_memseg_lists_init(void)
struct memtype *type = &memtypes[cur_type];
uint64_t max_mem_per_list, pagesz;
int socket_id;
+ unsigned int need_n_segs, cur_n_segs;
pagesz = type->page_sz;
socket_id = type->socket_id;
@@ -175,8 +229,17 @@ eal_dynmem_memseg_lists_init(void)
"n_segs:%i socket_id:%i hugepage_sz:%" PRIu64 "\n",
n_seglists, n_segs, socket_id, pagesz);
+ if (internal_conf->map_perfect)
+ need_n_segs = eal_sec_get_num_pages(used_hp, pagesz, socket_id);
+ else
+ need_n_segs = n_segs;
+
/* create all segment lists */
- for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {
+ for (cur_seglist = 0; cur_seglist < n_seglists && need_n_segs > 0; cur_seglist++) {
+ cur_n_segs = RTE_MIN(need_n_segs, n_segs);
+ if (internal_conf->map_perfect)
+ need_n_segs -= cur_n_segs;
+
if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
RTE_LOG(ERR, EAL,
"No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n");
@@ -184,7 +247,7 @@ eal_dynmem_memseg_lists_init(void)
}
msl = &mcfg->memsegs[msl_idx++];
- if (eal_memseg_list_init(msl, pagesz, n_segs,
+ if (eal_memseg_list_init(msl, pagesz, cur_n_segs,
socket_id, cur_seglist, true))
goto out;
diff --git a/lib/eal/common/eal_common_fbarray.c b/lib/eal/common/eal_common_fbarray.c
index 2055bfa..0406865 100644
--- a/lib/eal/common/eal_common_fbarray.c
+++ b/lib/eal/common/eal_common_fbarray.c
@@ -8,6 +8,8 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
+#include <sys/file.h>
+#include <sys/mman.h>
#include <rte_common.h>
#include <rte_eal_paging.h>
@@ -827,8 +829,9 @@ rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
return -1;
}
-int
-rte_fbarray_attach(struct rte_fbarray *arr)
+static int
+__rte_fbarray_attach(struct rte_fbarray *arr, const char *runtime_dir,
+ const struct internal_config *internal_conf)
{
struct mem_area *ma = NULL, *tmp = NULL;
size_t page_sz, mmap_len;
@@ -864,13 +867,15 @@ rte_fbarray_attach(struct rte_fbarray *arr)
mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
- /* check the tailq - maybe user has already mapped this address space */
- rte_spinlock_lock(&mem_area_lock);
+ if (!internal_conf->pri_and_sec) {
+ /* check the tailq - maybe user has already mapped this address space */
+ rte_spinlock_lock(&mem_area_lock);
- TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
- if (overlap(tmp, arr->data, mmap_len)) {
- rte_errno = EEXIST;
- goto fail;
+ TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
+ if (overlap(tmp, arr->data, mmap_len)) {
+ rte_errno = EEXIST;
+ goto fail;
+ }
}
}
@@ -880,7 +885,7 @@ rte_fbarray_attach(struct rte_fbarray *arr)
if (data == NULL)
goto fail;
- eal_get_fbarray_path(path, sizeof(path), arr->name);
+ eal_sec_get_fbarray_path(path, sizeof(path), arr->name, runtime_dir);
fd = eal_file_open(path, EAL_OPEN_READWRITE);
if (fd < 0) {
@@ -894,16 +899,30 @@ rte_fbarray_attach(struct rte_fbarray *arr)
if (resize_and_map(fd, path, data, mmap_len))
goto fail;
- /* store our new memory area */
- ma->addr = data;
- ma->fd = fd; /* keep fd until detach/destroy */
- ma->len = mmap_len;
+ if (internal_conf->pri_and_sec) {
+ if (flock(fd, LOCK_UN)) {
+ rte_errno = errno;
+ goto fail;
+ }
+ close(fd);
+ fd = -1;
+ }
- TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
+ if (!internal_conf->pri_and_sec) {
+ /* store our new memory area */
+ ma->addr = data;
+ ma->fd = fd; /* keep fd until detach/destroy */
+ ma->len = mmap_len;
- /* we're done */
+ TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
- rte_spinlock_unlock(&mem_area_lock);
+ /* we're done */
+
+ rte_spinlock_unlock(&mem_area_lock);
+ } else {
+ /* pri_and_sec don't use mem_area_tailq */
+ free(ma);
+ }
return 0;
fail:
if (data)
@@ -915,6 +934,31 @@ rte_fbarray_attach(struct rte_fbarray *arr)
return -1;
}
+int
+rte_fbarray_attach(struct rte_fbarray *arr)
+{
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
+ return __rte_fbarray_attach(arr, rte_eal_get_runtime_dir(), internal_conf);
+}
+
+int
+rte_sec_fbarray_attach(struct rte_fbarray *arr,
+ const int switch_pri_and_sec, const int sec_idx)
+{
+ struct internal_config *internal_conf = NULL;
+ char *runtime_dir = NULL;
+
+ if (!switch_pri_and_sec) {
+ runtime_dir = rte_eal_get_runtime_dir();
+ internal_conf = eal_get_internal_configuration();
+ } else {
+ runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+ internal_conf = rte_eal_sec_get_internal_config(sec_idx);
+ }
+
+ return __rte_fbarray_attach(arr, runtime_dir, internal_conf);
+}
+
int
rte_fbarray_detach(struct rte_fbarray *arr)
{
@@ -1054,6 +1098,45 @@ rte_fbarray_destroy(struct rte_fbarray *arr)
return ret;
}
+int
+rte_sec_fbarray_destroy(struct rte_fbarray *arr,
+ const int sec_idx)
+{
+ int fd;
+ char path[PATH_MAX];
+
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ size_t page_sz = rte_mem_page_size();
+ if (page_sz == (size_t)-1)
+ return -1;
+
+ size_t mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+ rte_mem_unmap(arr->data, mmap_len);
+
+ /* try deleting the file */
+ eal_sec_get_fbarray_path(path, sizeof(path), arr->name, rte_eal_sec_get_runtime_dir(sec_idx));
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(WARNING, EAL, "Could not open %s: %s, and just skip it\n", path, strerror(errno));
+ return 0;
+ }
+ if (flock(fd, LOCK_EX | LOCK_NB)) {
+ RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
+ rte_errno = EBUSY;
+ } else {
+ unlink(path);
+ memset(arr, 0, sizeof(*arr));
+ }
+ close(fd);
+
+ return 0;
+}
+
void *
rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
{
diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index d9433db..48d72f6 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -315,9 +315,9 @@ virt2memseg(const void *addr, const struct rte_memseg_list *msl)
}
static struct rte_memseg_list *
-virt2memseg_list(const void *addr)
+virt2memseg_list(const void *addr, const struct rte_config *rte_cfg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
struct rte_memseg_list *msl;
int msl_idx;
@@ -339,7 +339,13 @@ virt2memseg_list(const void *addr)
struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *addr)
{
- return virt2memseg_list(addr);
+ return virt2memseg_list(addr, rte_eal_get_configuration());
+}
+
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg)
+{
+ return virt2memseg_list(addr, rte_cfg);
}
struct virtiova {
@@ -394,11 +400,25 @@ rte_mem_iova2virt(rte_iova_t iova)
return vi.virt;
}
+static struct rte_memseg *
+__rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg)
+{
+ return virt2memseg(addr, msl != NULL ? msl :
+ rte_sec_mem_virt2memseg_list(addr, rte_cfg));
+}
+
struct rte_memseg *
rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
- return virt2memseg(addr, msl != NULL ? msl :
- rte_mem_virt2memseg_list(addr));
+ return __rte_mem_virt2memseg(addr, msl, rte_eal_get_configuration());
+}
+
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg)
+{
+ return __rte_mem_virt2memseg(addr, msl, rte_cfg);
}
static int
@@ -1077,24 +1097,27 @@ rte_eal_memory_detach(void)
}
/* init memory subsystem */
-int
-rte_eal_memory_init(void)
+static int
+__rte_eal_memory_init(__attribute__((__unused__)) const char *runtime_dir,
+ const struct internal_config *internal_conf,
+ struct rte_config *rte_cfg,
+ const int switch_pri_and_sec,
+ const int sec_idx)
{
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
int retval;
RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
- if (rte_eal_memseg_init() < 0)
+ if (rte_eal_memseg_init(switch_pri_and_sec, sec_idx) < 0)
goto fail;
- if (eal_memalloc_init() < 0)
- goto fail;
+ if (!internal_conf->pri_and_sec)
+ if (eal_memalloc_init() < 0)
+ goto fail;
- retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ retval = rte_cfg->process_type == RTE_PROC_PRIMARY ?
rte_eal_hugepage_init() :
- rte_eal_hugepage_attach();
+ rte_eal_hugepage_attach(switch_pri_and_sec, sec_idx);
if (retval < 0)
goto fail;
@@ -1106,6 +1129,37 @@ rte_eal_memory_init(void)
return -1;
}
+int
+rte_eal_memory_init(void)
+{
+ const int unused_idx = -1;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return __rte_eal_memory_init(rte_eal_get_runtime_dir(),
+ internal_conf, rte_eal_get_configuration(),
+ false, unused_idx);
+}
+
+int
+rte_eal_sec_memory_init(const int sec_idx)
+{
+ int ret;
+ struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+
+ ret = __rte_eal_memory_init(rte_eal_sec_get_runtime_dir(sec_idx),
+ rte_eal_sec_get_internal_config(sec_idx), rte_cfg,
+ true, sec_idx);
+
+ return ret;
+}
+
+int
+rte_eal_sec_memory_cleanup(const int sec_idx)
+{
+ return eal_sec_memalloc_destroy(sec_idx);
+}
+
#ifndef RTE_EXEC_ENV_WINDOWS
#define EAL_MEMZONE_LIST_REQ "/eal/memzone_list"
#define EAL_MEMZONE_INFO_REQ "/eal/memzone_info"
diff --git a/lib/eal/common/eal_common_memzone.c b/lib/eal/common/eal_common_memzone.c
index 1f3e701..5253e47 100644
--- a/lib/eal/common/eal_common_memzone.c
+++ b/lib/eal/common/eal_common_memzone.c
@@ -23,7 +23,7 @@
#include "eal_memcfg.h"
/* Default count used until rte_memzone_max_set() is called */
-#define DEFAULT_MAX_MEMZONE_COUNT 2560
+#define DEFAULT_MAX_MEMZONE_COUNT 65535
int
rte_memzone_max_set(size_t max)
diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c
index a6d21f1..80ec478 100644
--- a/lib/eal/common/eal_common_options.c
+++ b/lib/eal/common/eal_common_options.c
@@ -105,6 +105,7 @@ eal_long_options[] = {
{OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM },
{OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM},
{OPT_HUGE_WORKER_STACK, 2, NULL, OPT_HUGE_WORKER_STACK_NUM },
+ {OPT_MAP_PERFECT, 0, NULL, OPT_MAP_PERFECT_NUM },
{0, 0, NULL, 0 }
};
@@ -301,6 +302,17 @@ eal_get_hugefile_prefix(void)
return HUGEFILE_PREFIX_DEFAULT;
}
+const char *
+eal_sec_get_hugefile_prefix(const int sec_idx)
+{
+ struct internal_config *internal_conf =
+ rte_eal_sec_get_internal_config(sec_idx);
+
+ if (internal_conf->hugefile_prefix != NULL)
+ return internal_conf->hugefile_prefix;
+ return HUGEFILE_PREFIX_DEFAULT;
+}
+
void
eal_reset_internal_config(struct internal_config *internal_cfg)
{
@@ -1498,12 +1510,10 @@ eal_parse_simd_bitwidth(const char *arg)
}
static int
-eal_parse_base_virtaddr(const char *arg)
+eal_parse_base_virtaddr(const char *arg, struct internal_config *conf)
{
char *end;
uint64_t addr;
- struct internal_config *internal_conf =
- eal_get_internal_configuration();
errno = 0;
addr = strtoull(arg, &end, 16);
@@ -1523,7 +1533,7 @@ eal_parse_base_virtaddr(const char *arg)
* it can align to 2MB for x86. So this alignment can also be used
* on x86 and other architectures.
*/
- internal_conf->base_virtaddr =
+ conf->base_virtaddr =
RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
return 0;
@@ -1904,7 +1914,7 @@ eal_parse_common_option(int opt, const char *optarg,
}
break;
case OPT_BASE_VIRTADDR_NUM:
- if (eal_parse_base_virtaddr(optarg) < 0) {
+ if (eal_parse_base_virtaddr(optarg, conf) < 0) {
RTE_LOG(ERR, EAL, "invalid parameter for --"
OPT_BASE_VIRTADDR "\n");
return -1;
@@ -1959,9 +1969,9 @@ eal_auto_detect_cores(struct rte_config *cfg)
}
static void
-compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
+compute_ctrl_threads_cpuset(struct internal_config *internal_conf)
{
- rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset;
+ rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
rte_cpuset_t default_set;
unsigned int lcore_id;
@@ -2027,13 +2037,36 @@ eal_adjust_config(struct internal_config *internal_cfg)
}
int
-eal_check_common_options(struct internal_config *internal_cfg)
+eal_sec_adjust_config(struct internal_config *internal_conf)
+{
+ struct internal_config *internal_conf_head;
+ internal_conf->process_type = RTE_PROC_SECONDARY;
+
+ internal_conf_head = rte_eal_sec_get_internal_config(0);
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ if (!internal_conf_head[i].pri_and_sec)
+ continue;
+ if (internal_conf == &internal_conf_head[i])
+ continue;
+ if (!strcmp(internal_conf_head[i].hugefile_prefix, internal_conf->hugefile_prefix))
+ return -EALREADY;
+ }
+
+ for (int i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_conf->memory += internal_conf->socket_mem[i];
+
+ return 0;
+}
+
+int
+eal_check_common_options(struct internal_config *internal_cfg,
+ struct rte_config *cfg)
{
- struct rte_config *cfg = rte_eal_get_configuration();
const struct internal_config *internal_conf =
eal_get_internal_configuration();
- if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) {
+ if (!internal_cfg->pri_and_sec &&
+ cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) {
RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n");
return -1;
}
@@ -2124,6 +2157,14 @@ eal_check_common_options(struct internal_config *internal_cfg)
"-m or --"OPT_SOCKET_MEM"\n");
}
+ if (internal_cfg->map_perfect || internal_cfg->pri_and_sec) {
+ if (!internal_cfg->legacy_mem || internal_cfg->in_memory || internal_cfg->no_hugetlbfs) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" or "OPT_IN_MEMORY" or "OPT_NO_HUGE" "
+ "is not compatible with --"OPT_MAP_PERFECT" and "OPT_PRI_AND_SEC"\n");
+ return -1;
+ }
+ }
+
return 0;
}
diff --git a/lib/eal/common/eal_filesystem.h b/lib/eal/common/eal_filesystem.h
index 5d21f07..bf3b0a4 100644
--- a/lib/eal/common/eal_filesystem.h
+++ b/lib/eal/common/eal_filesystem.h
@@ -23,7 +23,7 @@
/* sets up platform-specific runtime data dir */
int
-eal_create_runtime_dir(void);
+eal_create_runtime_dir(const int sec_idx);
int
eal_clean_runtime_dir(void);
@@ -32,17 +32,32 @@ eal_clean_runtime_dir(void);
const char *
eal_get_hugefile_prefix(void);
+const char *
+eal_sec_get_hugefile_prefix(const int sec_idx);
+
#define RUNTIME_CONFIG_FNAME "config"
static inline const char *
-eal_runtime_config_path(void)
+__eal_runtime_config_path(const char *runtime_dir)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(),
+ snprintf(buffer, sizeof(buffer), "%s/%s", runtime_dir,
RUNTIME_CONFIG_FNAME);
return buffer;
}
+static inline const char *
+eal_runtime_config_path(void)
+{
+ return __eal_runtime_config_path(rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_runtime_config_path(const char *runtime_dir)
+{
+ return __eal_runtime_config_path(runtime_dir);
+}
+
/** Path of primary/secondary communication unix socket file. */
#define MP_SOCKET_FNAME "mp_socket"
static inline const char *
@@ -57,12 +72,29 @@ eal_mp_socket_path(void)
#define FBARRAY_NAME_FMT "%s/fbarray_%s"
static inline const char *
-eal_get_fbarray_path(char *buffer, size_t buflen, const char *name) {
- snprintf(buffer, buflen, FBARRAY_NAME_FMT, rte_eal_get_runtime_dir(),
+__eal_get_fbarray_path(char *buffer, size_t buflen, const char *name,
+ const char *runtime_dir)
+{
+ snprintf(buffer, buflen, FBARRAY_NAME_FMT, runtime_dir,
name);
return buffer;
}
+static inline const char *
+eal_get_fbarray_path(char *buffer, size_t buflen, const char *name)
+{
+ return __eal_get_fbarray_path(buffer, buflen, name,
+ rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_get_fbarray_path(char *buffer, size_t buflen,
+ const char *name, const char *runtime_dir)
+{
+ return __eal_get_fbarray_path(buffer, buflen, name,
+ runtime_dir);
+}
+
/** Path of hugepage info file. */
#define HUGEPAGE_INFO_FNAME "hugepage_info"
static inline const char *
@@ -78,15 +110,27 @@ eal_hugepage_info_path(void)
/** Path of hugepage data file. */
#define HUGEPAGE_DATA_FNAME "hugepage_data"
static inline const char *
-eal_hugepage_data_path(void)
+__eal_hugepage_data_path(const char *runtime_dir)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(),
+ snprintf(buffer, sizeof(buffer), "%s/%s", runtime_dir,
HUGEPAGE_DATA_FNAME);
return buffer;
}
+static inline const char *
+eal_hugepage_data_path(void)
+{
+ return __eal_hugepage_data_path(rte_eal_get_runtime_dir());
+}
+
+static inline const char *
+eal_sec_hugepage_data_path(const char *runtime_dir)
+{
+ return __eal_hugepage_data_path(runtime_dir);
+}
+
/** String format for hugepage map files. */
#define HUGEFILE_FMT "%s/%smap_%d"
static inline const char *
diff --git a/lib/eal/common/eal_internal_cfg.h b/lib/eal/common/eal_internal_cfg.h
index 167ec50..79802de 100644
--- a/lib/eal/common/eal_internal_cfg.h
+++ b/lib/eal/common/eal_internal_cfg.h
@@ -103,6 +103,8 @@ struct internal_config {
struct simd_bitwidth max_simd_bitwidth;
/**< max simd bitwidth path to use */
size_t huge_worker_stack_size; /**< worker thread stack size */
+ volatile unsigned pri_and_sec;
+ volatile unsigned map_perfect;
};
void eal_reset_internal_config(struct internal_config *internal_cfg);
diff --git a/lib/eal/common/eal_memalloc.h b/lib/eal/common/eal_memalloc.h
index 286ffb7..f55b48f 100644
--- a/lib/eal/common/eal_memalloc.h
+++ b/lib/eal/common/eal_memalloc.h
@@ -83,6 +83,10 @@ eal_memalloc_get_seg_fd(int list_idx, int seg_idx);
int
eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd);
+int
+eal_sec_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const int switch_pri_and_sec, const int sec_idx);
+
/* returns 0 or -errno */
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd);
@@ -97,4 +101,7 @@ eal_memalloc_init(void)
int
eal_memalloc_cleanup(void);
+int
+eal_sec_memalloc_destroy(const int sec_idx);
+
#endif /* EAL_MEMALLOC_H */
diff --git a/lib/eal/common/eal_options.h b/lib/eal/common/eal_options.h
index 3cc9cb6..ef4de6b 100644
--- a/lib/eal/common/eal_options.h
+++ b/lib/eal/common/eal_options.h
@@ -89,6 +89,10 @@ enum {
OPT_FORCE_MAX_SIMD_BITWIDTH_NUM,
#define OPT_HUGE_WORKER_STACK "huge-worker-stack"
OPT_HUGE_WORKER_STACK_NUM,
+#define OPT_PRI_AND_SEC "pri-and-sec"
+ OPT_PRI_AND_SEC_NUM,
+#define OPT_MAP_PERFECT "map-perfect"
+ OPT_MAP_PERFECT_NUM,
OPT_LONG_MAX_NUM
};
@@ -99,9 +103,10 @@ extern const struct option eal_long_options[];
int eal_parse_common_option(int opt, const char *argv,
struct internal_config *conf);
int eal_option_device_parse(void);
-int eal_adjust_config(struct internal_config *internal_cfg);
-int eal_cleanup_config(struct internal_config *internal_cfg);
-int eal_check_common_options(struct internal_config *internal_cfg);
+int eal_adjust_config(struct internal_config *internal_conf);
+int eal_sec_adjust_config(struct internal_config *internal_conf);
+int eal_cleanup_config(struct internal_config *internal_conf);
+int eal_check_common_options(struct internal_config *internal_conf, struct rte_config *cfg);
void eal_common_usage(void);
enum rte_proc_type_t eal_proc_type_detect(void);
int eal_plugins_init(void);
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 4d2e806..68c1c63 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -103,7 +103,8 @@ int rte_eal_cpu_init(void);
* @return
* 0 on success, negative on error
*/
-int rte_eal_memseg_init(void);
+//int rte_eal_memseg_init(void);
+int rte_eal_memseg_init(const int switch_pri_and_sec, const int sec_idx);
/**
* Map memory
@@ -118,6 +119,9 @@ int rte_eal_memseg_init(void);
int rte_eal_memory_init(void)
__rte_shared_locks_required(rte_mcfg_mem_get_lock());
+int rte_eal_sec_memory_init(const int sec_idx);
+int rte_eal_sec_memory_cleanup(const int sec_idx);
+
/**
* Configure timers
*
@@ -414,7 +418,8 @@ int rte_eal_hugepage_init(void);
*
* This function is private to the EAL.
*/
-int rte_eal_hugepage_attach(void);
+//int rte_eal_hugepage_attach(void);
+int rte_eal_hugepage_attach(const int switch_pri_and_sec, const int sec_idx);
/**
* Detaches all memory mappings from a process.
@@ -687,6 +692,9 @@ eal_mem_free(void *virt, size_t size);
int
eal_mem_set_dump(void *virt, size_t size, bool dump);
+int
+eal_sec_set_runtime_dir(char *run_dir, const int sec_idx);
+
/**
* Sets the runtime directory of DPDK
*
@@ -696,7 +704,7 @@ eal_mem_set_dump(void *virt, size_t size, bool dump);
* 0 on success, (-1) on failure.
*/
int
-eal_set_runtime_dir(const char *run_dir);
+eal_set_runtime_dir(char *run_dir);
/**
* Get the internal configuration structure.
@@ -747,4 +755,19 @@ int eal_asprintf(char **buffer, const char *format, ...);
eal_asprintf(buffer, format, ##__VA_ARGS__)
#endif
+
+/****** APIs for libnet ******/
+#include <rte_memory.h>
+
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg);
+
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg);
+
+int
+rte_sec_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg,
+ struct rte_config *rte_cfg);
+
#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/eal/include/rte_eal.h b/lib/eal/include/rte_eal.h
index c2256f8..6f23f37 100644
--- a/lib/eal/include/rte_eal.h
+++ b/lib/eal/include/rte_eal.h
@@ -487,9 +487,17 @@ rte_eal_mbuf_user_pool_ops(void);
* @return
* The runtime directory path of DPDK
*/
-const char *
+char *
rte_eal_get_runtime_dir(void);
+/****** APIs for libnet ******/
+char *rte_eal_sec_get_runtime_dir(const int sec_idx);
+struct rte_config *rte_eal_sec_get_configuration(const int sec_idx);
+struct internal_config *rte_eal_sec_get_internal_config(const int sec_idx);
+
+int rte_eal_sec_attach(int argc, char **argv);
+int rte_eal_sec_detach(const char *file_prefix, int length);
+
/**
* Convert a string describing a mask of core ids into an array of core ids.
*
diff --git a/lib/eal/include/rte_fbarray.h b/lib/eal/include/rte_fbarray.h
index e330767..05e292e 100644
--- a/lib/eal/include/rte_fbarray.h
+++ b/lib/eal/include/rte_fbarray.h
@@ -98,6 +98,10 @@ rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
int
rte_fbarray_attach(struct rte_fbarray *arr);
+int
+rte_sec_fbarray_attach(struct rte_fbarray *arr,
+ const int switch_pri_and_sec, const int sec_idx);
+
/**
* Deallocate resources for an already allocated and correctly set up
@@ -119,6 +123,8 @@ rte_fbarray_attach(struct rte_fbarray *arr);
int
rte_fbarray_destroy(struct rte_fbarray *arr);
+int
+rte_sec_fbarray_destroy(struct rte_fbarray *arr, const int sec_idx);
/**
* Deallocate resources for an already allocated and correctly set up
diff --git a/lib/eal/include/rte_memory.h b/lib/eal/include/rte_memory.h
index 842362d..83aa7e5 100644
--- a/lib/eal/include/rte_memory.h
+++ b/lib/eal/include/rte_memory.h
@@ -145,7 +145,12 @@ rte_mem_iova2virt(rte_iova_t iova);
*/
struct rte_memseg *
rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
-
+/*
+__rte_experimental
+struct rte_memseg *
+rte_sec_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl,
+ const struct rte_config *rte_cfg);
+*/
/**
* Get memseg list corresponding to virtual memory address.
*
@@ -156,7 +161,11 @@ rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
*/
struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *virt);
-
+/*
+__rte_experimental
+struct rte_memseg_list *
+rte_sec_mem_virt2memseg_list(const void *addr, const struct rte_config *rte_cfg);
+*/
/**
* Memseg walk function prototype.
*
@@ -271,7 +280,12 @@ rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
*/
int
rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg);
-
+/*
+__rte_experimental
+int
+rte_sec_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg,
+ struct rte_config *rte_cfg);
+*/
/**
* Walk each VA-contiguous area without performing any locking.
*
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index bc0ca2b..fd66fc4 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -260,21 +260,22 @@ rte_eal_config_create(void)
/* attach to an existing shared memory config */
static int
-rte_eal_config_attach(void)
+__rte_eal_config_attach(const int mmap_flags, int *mem_cfg_fd,
+ const char *runtime_dir,
+ const struct internal_config *internal_conf,
+ struct rte_config *rte_cfg)
{
- struct rte_config *config = rte_eal_get_configuration();
struct rte_mem_config *mem_config;
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ int mcfg_fd = *mem_cfg_fd;
- const char *pathname = eal_runtime_config_path();
+ const char *pathname = eal_sec_runtime_config_path(runtime_dir);
if (internal_conf->no_shconf)
return 0;
- if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR);
- if (mem_cfg_fd < 0) {
+ if (mcfg_fd < 0){
+ mcfg_fd = open(pathname, O_RDWR);
+ if (mcfg_fd < 0) {
RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n",
pathname);
return -1;
@@ -283,20 +284,32 @@ rte_eal_config_attach(void)
/* map it as read-only first */
mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config),
- PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
+ mmap_flags, MAP_SHARED, mcfg_fd, 0);
if (mem_config == MAP_FAILED) {
- close(mem_cfg_fd);
- mem_cfg_fd = -1;
+ close(mcfg_fd);
+ mcfg_fd = -1;
RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n",
errno, strerror(errno));
return -1;
}
- config->mem_config = mem_config;
+ rte_cfg->mem_config = mem_config;
+ *mem_cfg_fd = mcfg_fd;
return 0;
}
+static int
+rte_eal_config_attach(void)
+{
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
+
+ return __rte_eal_config_attach(PROT_READ, &mem_cfg_fd,
+ rte_eal_get_runtime_dir(), internal_conf,
+ rte_eal_get_configuration());
+}
+
+
/* reattach the shared config at exact memory location primary process has it */
static int
rte_eal_config_reattach(void)
@@ -413,6 +426,53 @@ rte_config_init(void)
return 0;
}
+static int
+rte_sec_config_init(const int sec_idx)
+{
+ int mem_cfg_fd = -1;
+ int mmap_flags = PROT_READ | PROT_WRITE;
+ int ret = -1;
+
+ struct rte_config *rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ struct internal_config *internal_conf = rte_eal_sec_get_internal_config(sec_idx);
+
+ rte_cfg->process_type = internal_conf->process_type;
+
+ ret = __rte_eal_config_attach(mmap_flags, &mem_cfg_fd,
+ rte_eal_sec_get_runtime_dir(sec_idx),
+ internal_conf, rte_cfg);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot attach shared memory\n");
+ return -1;
+ }
+
+
+ close(mem_cfg_fd);
+ return 0;
+}
+
+static int
+eal_sec_config_cleanup(const int sec_idx)
+{
+ int ret;
+ struct rte_config *lc_rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ struct internal_config *lc_internal_cfg = rte_eal_sec_get_internal_config(sec_idx);
+ char *lc_runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+
+ ret = munmap(lc_rte_cfg->mem_config, sizeof(*lc_rte_cfg->mem_config));
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap config memory!\n");
+ return -1;
+ }
+
+ memset(lc_rte_cfg, 0, sizeof(*lc_rte_cfg));
+ eal_cleanup_config(lc_internal_cfg);
+ memset(lc_internal_cfg, 0, sizeof(*lc_internal_cfg));
+ memset(lc_runtime_dir, 0, PATH_MAX);
+
+ return 0;
+}
+
/* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
static void
eal_hugedirs_unlock(void)
@@ -453,6 +513,7 @@ eal_usage(const char *prgname)
" --"OPT_LEGACY_MEM" Legacy memory mode (no dynamic allocation, contiguous segments)\n"
" --"OPT_SINGLE_FILE_SEGMENTS" Put all hugepage memory in single files\n"
" --"OPT_MATCH_ALLOCATIONS" Free hugepages exactly as allocated\n"
+ " --"OPT_MAP_PERFECT" Map virtual addresses according to configured hugepage size\n"
" --"OPT_HUGE_WORKER_STACK"[=size]\n"
" Allocate worker thread stacks from hugepage memory.\n"
" Size is in units of kbytes and defaults to system\n"
@@ -624,7 +685,9 @@ eal_parse_huge_worker_stack(const char *arg)
/* Parse the argument given in the command line of the application */
static int
-eal_parse_args(int argc, char **argv)
+__eal_parse_args(int argc, char **argv, const int sec_idx,
+ struct internal_config *internal_conf,
+ struct rte_config *rte_cfg)
{
int opt, ret;
char **argvopt;
@@ -633,8 +696,6 @@ eal_parse_args(int argc, char **argv)
const int old_optind = optind;
const int old_optopt = optopt;
char * const old_optarg = optarg;
- struct internal_config *internal_conf =
- eal_get_internal_configuration();
argvopt = argv;
optind = 1;
@@ -758,6 +819,9 @@ eal_parse_args(int argc, char **argv)
case OPT_MATCH_ALLOCATIONS_NUM:
internal_conf->match_allocations = 1;
break;
+ case OPT_MAP_PERFECT_NUM:
+ internal_conf->map_perfect = 1;
+ break;
case OPT_HUGE_WORKER_STACK_NUM:
if (eal_parse_huge_worker_stack(optarg) < 0) {
@@ -789,7 +853,7 @@ eal_parse_args(int argc, char **argv)
}
/* create runtime data directory. In no_shconf mode, skip any errors */
- if (eal_create_runtime_dir() < 0) {
+ if (eal_create_runtime_dir(sec_idx) < 0) {
if (internal_conf->no_shconf == 0) {
RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
ret = -1;
@@ -798,20 +862,23 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(WARNING, EAL, "No DPDK runtime directory created\n");
}
- if (eal_adjust_config(internal_conf) != 0) {
- ret = -1;
- goto out;
+ if (!internal_conf->pri_and_sec) {
+ ret = eal_adjust_config(internal_conf);
+ if (ret != 0)
+ goto out;
+ } else {
+ ret = eal_sec_adjust_config(internal_conf);
+ if (ret != 0)
+ goto out;
}
/* sanity checks */
- if (eal_check_common_options(internal_conf) != 0) {
+ if (eal_check_common_options(internal_conf, rte_cfg) != 0) {
eal_usage(prgname);
ret = -1;
goto out;
}
- if (optind >= 0)
- argv[optind-1] = prgname;
ret = optind-1;
out:
@@ -823,6 +890,24 @@ eal_parse_args(int argc, char **argv)
return ret;
}
+static int
+eal_parse_args(int argc, char **argv)
+{
+ struct internal_config *internal_conf = eal_get_internal_configuration();
+
+ return __eal_parse_args(argc, argv, -1,
+ internal_conf,
+ rte_eal_get_configuration());
+}
+
+static int
+eal_sec_parse_args(int argc, char **argv, const int sec_idx)
+{
+ return __eal_parse_args(argc, argv, sec_idx,
+ rte_eal_sec_get_internal_config(sec_idx),
+ rte_eal_sec_get_configuration(sec_idx));
+}
+
static int
check_socket(const struct rte_memseg_list *msl, void *arg)
{
@@ -1455,3 +1540,108 @@ rte_eal_check_module(const char *module_name)
/* Module has been found */
return 1;
}
+
+
+/****** APIs for libnet ******/
+static unsigned int sec_count = 0;
+int
+rte_eal_sec_attach(int argc, char **argv)
+{
+ int ret;
+ int sec_idx = -1;
+ struct internal_config *lc_internal_conf = NULL;
+
+ if (sec_count >= RTE_MAX_SECONDARY) {
+ RTE_LOG(ERR, EAL, "Too many secondary processes: %d.\n", sec_count);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ lc_internal_conf = rte_eal_sec_get_internal_config(i);
+ if (lc_internal_conf->pri_and_sec == 0) {
+ lc_internal_conf->pri_and_sec = 1;
+ sec_idx = i;
+ break;
+ }
+ }
+
+ eal_reset_internal_config(lc_internal_conf);
+
+ ret = eal_sec_parse_args(argc, argv, sec_idx);
+ if (ret < 0) {
+ if (ret == -EALREADY) {
+ RTE_LOG(ERR, EAL, "file_refix %s already called initialization.\n",
+ lc_internal_conf->hugefile_prefix);
+ rte_errno = EALREADY;
+ } else {
+ RTE_LOG(ERR, EAL, "Invalid 'command line' arguments.\n");
+ rte_errno = EINVAL;
+ }
+ return -1;
+ }
+
+ ret = rte_sec_config_init(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot init sec config\n");
+ return -1;
+ }
+
+ ret = rte_eal_sec_memory_init(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot init memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ sec_count++;
+ return 0;
+}
+
+int
+rte_eal_sec_detach(const char *file_prefix, int length)
+{
+ int ret;
+ int sec_idx = -1;
+ struct internal_config *lc_internal_conf = NULL;
+
+ if (!file_prefix || length <= 0) {
+ RTE_LOG(ERR, EAL, "Invalid 'file_prefix or length' arguments.\n");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ for (int i = 0; i < RTE_MAX_SECONDARY; ++i) {
+ lc_internal_conf = rte_eal_sec_get_internal_config(i);
+ if (lc_internal_conf->pri_and_sec == 0)
+ continue;
+ if (!strncmp(lc_internal_conf->hugefile_prefix, file_prefix, length)) {
+ sec_idx = i;
+ break;
+ }
+ }
+ if (sec_idx == -1) {
+ RTE_LOG(ERR, EAL, "Cannot find file_prefix %s.\n", file_prefix);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ ret = rte_eal_sec_memory_cleanup(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot cleanup memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ ret = eal_sec_config_cleanup(sec_idx);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot cleanup hugepage sharefile.\n");
+ rte_errno = EACCES;
+ return -1;
+ }
+
+ if (sec_count) {
+ sec_count--;
+ }
+ return 0;
+}
diff --git a/lib/eal/linux/eal_hugepage_info.c b/lib/eal/linux/eal_hugepage_info.c
index 581d9df..7047842 100644
--- a/lib/eal/linux/eal_hugepage_info.c
+++ b/lib/eal/linux/eal_hugepage_info.c
@@ -451,7 +451,7 @@ calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent,
* This could be determined by mapping,
* but it is precisely what hugepage file reuse is trying to avoid.
*/
- if (!internal_conf->legacy_mem && reusable_pages == 0)
+ if ((!internal_conf->legacy_mem && reusable_pages == 0) || internal_conf->map_perfect)
for (i = 0; i < rte_socket_count(); i++) {
int socket = rte_socket_id_by_idx(i);
unsigned int num_pages =
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index 21c5729..e55a00f 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -31,6 +31,7 @@
#include <rte_log.h>
#include <rte_eal.h>
#include <rte_memory.h>
+#include <rte_eal_paging.h>
#include "eal_filesystem.h"
#include "eal_internal_cfg.h"
@@ -87,12 +88,14 @@ static int fallocate_supported = -1; /* unknown */
* they will be initialized at startup, and filled as we allocate/deallocate
* segments.
*/
-static struct {
+struct fd_list{
int *fds; /**< dynamically allocated array of segment lock fd's */
int memseg_list_fd; /**< memseg list fd */
int len; /**< total length of the array */
int count; /**< entries used in an array */
-} fd_list[RTE_MAX_MEMSEG_LISTS];
+};
+static struct fd_list fd_list[RTE_MAX_MEMSEG_LISTS];
+static struct fd_list sec_fd_list[RTE_MAX_SECONDARY][RTE_MAX_MEMSEG_LISTS];
/** local copy of a memory map, used to synchronize memory hotplug in MP */
static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
@@ -1489,7 +1492,7 @@ secondary_msl_destroy_walk(const struct rte_memseg_list *msl,
}
static int
-alloc_list(int list_idx, int len)
+__alloc_list(int list_idx, int len, struct fd_list *fd_ls)
{
int *data;
int i;
@@ -1497,7 +1500,7 @@ alloc_list(int list_idx, int len)
eal_get_internal_configuration();
/* single-file segments mode does not need fd list */
- if (!internal_conf->single_file_segments) {
+ if (!internal_conf->single_file_segments) { // sec todo
/* ensure we have space to store fd per each possible segment */
data = malloc(sizeof(int) * len);
if (data == NULL) {
@@ -1507,24 +1510,36 @@ alloc_list(int list_idx, int len)
/* set all fd's as invalid */
for (i = 0; i < len; i++)
data[i] = -1;
- fd_list[list_idx].fds = data;
- fd_list[list_idx].len = len;
+ fd_ls[list_idx].fds = data;
+ fd_ls[list_idx].len = len;
} else {
- fd_list[list_idx].fds = NULL;
- fd_list[list_idx].len = 0;
+ fd_ls[list_idx].fds = NULL;
+ fd_ls[list_idx].len = 0;
}
- fd_list[list_idx].count = 0;
- fd_list[list_idx].memseg_list_fd = -1;
+ fd_ls[list_idx].count = 0;
+ fd_ls[list_idx].memseg_list_fd = -1;
return 0;
}
+static int
+alloc_list(int list_idx, int len)
+{
+ return __alloc_list(list_idx, len, fd_list);
+}
+
+static int
+sec_alloc_list(int list_idx, int len, struct fd_list *fd_ls)
+{
+ return __alloc_list(list_idx, len, fd_ls);
+}
+
static int
destroy_list(int list_idx)
{
const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ eal_get_internal_configuration();
/* single-file segments mode does not need fd list */
if (!internal_conf->single_file_segments) {
@@ -1579,29 +1594,54 @@ fd_list_destroy_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
return destroy_list(msl_idx);
}
-int
-eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+static int
+__eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const struct rte_config *rte_cfg, struct fd_list *fd_ls)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
/* single file segments mode doesn't support individual segment fd's */
- if (internal_conf->single_file_segments)
+ if (internal_conf->single_file_segments) // sec todo
return -ENOTSUP;
/* if list is not allocated, allocate it */
- if (fd_list[list_idx].len == 0) {
+ if (fd_ls[list_idx].len == 0) {
int len = mcfg->memsegs[list_idx].memseg_arr.len;
- if (alloc_list(list_idx, len) < 0)
+ if (sec_alloc_list(list_idx, len, fd_ls) < 0)
return -ENOMEM;
}
- fd_list[list_idx].fds[seg_idx] = fd;
+ fd_ls[list_idx].fds[seg_idx] = fd;
return 0;
}
+int
+eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+{
+ return __eal_memalloc_set_seg_fd(list_idx, seg_idx, fd,
+ rte_eal_get_configuration(), fd_list);
+}
+
+int
+eal_sec_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd,
+ const int switch_pri_and_sec, const int sec_idx)
+{
+ struct rte_config *rte_cfg = NULL;
+ struct fd_list *fd_ls = NULL;
+
+ if (!switch_pri_and_sec) {
+ rte_cfg = rte_eal_get_configuration();
+ fd_ls = &fd_list[0];
+ } else {
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ fd_ls = &sec_fd_list[sec_idx][0];
+ }
+
+ return __eal_memalloc_set_seg_fd(list_idx, seg_idx, fd, rte_cfg, fd_ls);
+}
+
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd)
{
@@ -1785,3 +1825,52 @@ eal_memalloc_init(void)
return -1;
return 0;
}
+
+static int
+fd_sec_list_destroy_walk(const struct rte_memseg_list *msl, const int sec_idx)
+{
+ struct rte_mem_config *mcfg = rte_eal_sec_get_configuration(sec_idx)->mem_config;
+ struct fd_list *fd_ls = sec_fd_list[sec_idx];
+ int list_idx;
+
+ list_idx = msl - mcfg->memsegs;
+ if (fd_ls[list_idx].len != 0) {
+ free(fd_ls[list_idx].fds);
+ /* We have closed fd, seeing in function of eal_legacy_hugepage_attach. */
+ //close(fd_ls[list_idx].fds[seg_idx]);
+ }
+ memset(&fd_ls[list_idx], 0, sizeof(fd_ls[list_idx]));
+
+ return 0;
+}
+
+int
+eal_sec_memalloc_destroy(const int sec_idx)
+{
+ struct rte_mem_config *mcfg = rte_eal_sec_get_configuration(sec_idx)->mem_config;
+ int i, ret = 0;
+
+ if (mcfg == NULL) {
+ return 0;
+ }
+
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+ if (msl->base_va == NULL)
+ continue;
+
+ if (fd_sec_list_destroy_walk(msl, sec_idx)) {
+ RTE_LOG(ERR, EAL, "Failed to clear secondary fd_list.\n");
+ return -1;
+ }
+
+ ret = rte_sec_fbarray_destroy(&msl->memseg_arr, sec_idx);
+ if (ret)
+ return ret;
+
+ rte_mem_unmap(msl->base_va, msl->len);
+ }
+
+ return 0;
+}
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08f..e3d4fb4 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -994,6 +994,7 @@ static int
remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
{
int cur_page, seg_start_page, new_memseg, ret;
+ const struct internal_config *internal_conf = eal_get_internal_configuration();
seg_start_page = 0;
for (cur_page = 0; cur_page < n_pages; cur_page++) {
@@ -1019,10 +1020,10 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
* address to lower address. Here, physical addresses are in
* descending order.
*/
- else if ((prev->physaddr - cur->physaddr) != cur->size)
+ else if (!internal_conf->map_perfect && (prev->physaddr - cur->physaddr) != cur->size)
new_memseg = 1;
#else
- else if ((cur->physaddr - prev->physaddr) != cur->size)
+ else if (!internal_conf->map_perfect && (cur->physaddr - prev->physaddr) != cur->size)
new_memseg = 1;
#endif
@@ -1250,6 +1251,24 @@ eal_legacy_hugepage_init(void)
/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
+ if (internal_conf->map_perfect) {
+ int sys_num_pages = 0;
+ int need_num_pages = 0;
+ struct rte_memseg_list *msl;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ sys_num_pages += internal_conf->hugepage_info[i].num_pages[j];
+ }
+
+ for (j = 0; j < RTE_MAX_MEMSEG_LISTS; j++) {
+ msl = &mcfg->memsegs[j];
+ if (internal_conf->hugepage_info[i].hugepage_sz == msl->page_sz)
+ need_num_pages += msl->memseg_arr.len;
+ }
+
+ internal_conf->hugepage_info[i].num_pages[0] = RTE_MIN(sys_num_pages, need_num_pages);
+ }
+
nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
}
@@ -1330,8 +1349,13 @@ eal_legacy_hugepage_init(void)
goto fail;
}
- qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
- sizeof(struct hugepage_file), cmp_physaddr);
+ /* continuous physical memory does not bring performance improvements,
+ * so no sorting is performed for quick startup.
+ */
+ if (!internal_conf->map_perfect) {
+ qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
+ sizeof(struct hugepage_file), cmp_physaddr);
+ }
/* we have processed a num of hugepages of this size, so inc offset */
hp_offset += hpi->num_pages[0];
@@ -1516,9 +1540,9 @@ getFileSize(int fd)
* in order to form a contiguous block in the virtual memory space
*/
static int
-eal_legacy_hugepage_attach(void)
+eal_legacy_hugepage_attach(const int switch_pri_and_sec, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = NULL;
struct hugepage_file *hp = NULL;
unsigned int num_hp = 0;
unsigned int i = 0;
@@ -1526,6 +1550,22 @@ eal_legacy_hugepage_attach(void)
off_t size = 0;
int fd, fd_hugepage = -1;
+ struct rte_config *rte_cfg = NULL;
+ struct internal_config *internal_conf = NULL;
+ char *runtime_dir = NULL;
+
+ if (!switch_pri_and_sec) {
+ runtime_dir = rte_eal_get_runtime_dir();
+ rte_cfg = rte_eal_get_configuration();
+ internal_conf = eal_get_internal_configuration();
+ } else {
+ runtime_dir = rte_eal_sec_get_runtime_dir(sec_idx);
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ internal_conf = rte_eal_sec_get_internal_config(sec_idx);
+ }
+
+ mcfg = rte_cfg->mem_config;
+
if (aslr_enabled() > 0) {
RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
"(ASLR) is enabled in the kernel.\n");
@@ -1533,10 +1573,10 @@ eal_legacy_hugepage_attach(void)
"into secondary processes\n");
}
- fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
+ fd_hugepage = open(eal_sec_hugepage_data_path(runtime_dir), O_RDONLY);
if (fd_hugepage < 0) {
RTE_LOG(ERR, EAL, "Could not open %s\n",
- eal_hugepage_data_path());
+ eal_sec_hugepage_data_path(runtime_dir));
goto error;
}
@@ -1544,7 +1584,7 @@ eal_legacy_hugepage_attach(void)
hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
if (hp == MAP_FAILED) {
RTE_LOG(ERR, EAL, "Could not mmap %s\n",
- eal_hugepage_data_path());
+ eal_sec_hugepage_data_path(runtime_dir));
goto error;
}
@@ -1591,13 +1631,13 @@ eal_legacy_hugepage_attach(void)
}
/* find segment data */
- msl = rte_mem_virt2memseg_list(map_addr);
+ msl = rte_sec_mem_virt2memseg_list(map_addr, rte_cfg);
if (msl == NULL) {
RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
__func__);
goto mmap_error;
}
- ms = rte_mem_virt2memseg(map_addr, msl);
+ ms = rte_sec_mem_virt2memseg(map_addr, msl, rte_cfg);
if (ms == NULL) {
RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
__func__);
@@ -1612,8 +1652,16 @@ eal_legacy_hugepage_attach(void)
goto mmap_error;
}
+ /* No hugefile lock is required in PRI_AND_SEC mode, close it
+ * to avoid opening too much fd.
+ */
+ if (internal_conf->pri_and_sec) {
+ close(fd);
+ fd = -1;
+ }
+
/* store segment fd internally */
- if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
+ if (eal_sec_memalloc_set_seg_fd(msl_idx, ms_idx, fd, switch_pri_and_sec, sec_idx) < 0)
RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
rte_strerror(rte_errno));
}
@@ -1662,13 +1710,17 @@ rte_eal_hugepage_init(void)
}
int
-rte_eal_hugepage_attach(void)
+rte_eal_hugepage_attach(const int switch_pri_and_sec, const int sec_idx)
{
- const struct internal_config *internal_conf =
- eal_get_internal_configuration();
+ struct internal_config *internal_conf;
+
+ if (!switch_pri_and_sec)
+ internal_conf = eal_get_internal_configuration();
+ else
+ internal_conf = rte_eal_sec_get_internal_config(sec_idx);
return internal_conf->legacy_mem ?
- eal_legacy_hugepage_attach() :
+ eal_legacy_hugepage_attach(switch_pri_and_sec, sec_idx) :
eal_hugepage_attach();
}
@@ -1886,9 +1938,10 @@ memseg_primary_init(void)
}
static int
-memseg_secondary_init(void)
+memseg_secondary_init(struct rte_config *rte_cfg,
+ const int switch_pri_and_sec, const int sec_idx)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_mem_config *mcfg = rte_cfg->mem_config;
int msl_idx = 0;
struct rte_memseg_list *msl;
@@ -1900,7 +1953,7 @@ memseg_secondary_init(void)
if (msl->memseg_arr.len == 0 || msl->external)
continue;
- if (rte_fbarray_attach(&msl->memseg_arr)) {
+ if (rte_sec_fbarray_attach(&msl->memseg_arr, switch_pri_and_sec, sec_idx)) {
RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
return -1;
}
@@ -1916,11 +1969,18 @@ memseg_secondary_init(void)
}
int
-rte_eal_memseg_init(void)
+rte_eal_memseg_init(const int switch_pri_and_sec, const int sec_idx)
{
/* increase rlimit to maximum */
struct rlimit lim;
+ struct rte_config *rte_cfg = NULL;
+ if (!switch_pri_and_sec) {
+ rte_cfg = rte_eal_get_configuration();
+ } else {
+ rte_cfg = rte_eal_sec_get_configuration(sec_idx);
+ }
+
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
const struct internal_config *internal_conf =
eal_get_internal_configuration();
@@ -1948,11 +2008,11 @@ rte_eal_memseg_init(void)
}
#endif
- return rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ return rte_cfg->process_type == RTE_PROC_PRIMARY ?
#ifndef RTE_ARCH_64
memseg_primary_init_32() :
#else
memseg_primary_init() :
#endif
- memseg_secondary_init();
+ memseg_secondary_init(rte_cfg, switch_pri_and_sec, sec_idx);
}
diff --git a/lib/eal/unix/eal_filesystem.c b/lib/eal/unix/eal_filesystem.c
index afbab93..b0ec9dd 100644
--- a/lib/eal/unix/eal_filesystem.c
+++ b/lib/eal/unix/eal_filesystem.c
@@ -17,7 +17,7 @@
#include "eal_private.h"
#include "eal_filesystem.h"
-int eal_create_runtime_dir(void)
+int eal_create_runtime_dir(const int sec_idx)
{
const char *directory;
char run_dir[PATH_MAX];
@@ -46,8 +46,9 @@ int eal_create_runtime_dir(void)
}
/* create prefix-specific subdirectory under DPDK runtime dir */
- ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
- tmp, eal_get_hugefile_prefix());
+ const char *prefix = (sec_idx < 0) ? eal_get_hugefile_prefix() :
+ eal_sec_get_hugefile_prefix(sec_idx);
+ ret = snprintf(run_dir, sizeof(run_dir), "%s/%s", tmp, prefix);
if (ret < 0 || ret == sizeof(run_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
@@ -70,7 +71,9 @@ int eal_create_runtime_dir(void)
return -1;
}
- if (eal_set_runtime_dir(run_dir))
+ ret = (sec_idx < 0) ? eal_set_runtime_dir(run_dir) :
+ eal_sec_set_runtime_dir(run_dir, sec_idx);
+ if (ret)
return -1;
return 0;
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 5e0cd47..0bfcfa4 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -87,6 +87,8 @@ DPDK_24 {
rte_eal_remote_launch;
rte_eal_tailq_lookup;
rte_eal_tailq_register;
+ rte_eal_sec_attach;
+ rte_eal_sec_detach;
rte_eal_using_phys_addrs;
rte_eal_vfio_get_vf_token; # WINDOWS_NO_EXPORT
rte_eal_vfio_intr_mode; # WINDOWS_NO_EXPORT
diff --git a/lib/ring/rte_ring.h b/lib/ring/rte_ring.h
index c709f30..750f29e 100644
--- a/lib/ring/rte_ring.h
+++ b/lib/ring/rte_ring.h
@@ -815,6 +815,81 @@ rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
n, available);
}
+/****** APIs for libnet ******/
+static __rte_always_inline unsigned
+rte_ring_cn_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t old_head = r->prod.tail;
+ rte_smp_rmb();
+
+ const uint32_t entries = r->cons.head - old_head;
+ if (n > entries) {
+ n = entries;
+ }
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ r->prod.head = old_head + n;
+ rte_smp_rmb();
+
+ __rte_ring_dequeue_elems(r, old_head, obj_table, sizeof(void *), n);
+ return n;
+}
+
+static __rte_always_inline void
+rte_ring_cn_enqueue(struct rte_ring *r)
+{
+ rte_smp_wmb();
+ r->prod.tail = r->prod.head;
+}
+
+static __rte_always_inline unsigned
+rte_ring_en_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t old_tail = r->cons.tail;
+ rte_smp_rmb();
+
+ const uint32_t entries = r->prod.tail - old_tail;
+ if (n > entries) {
+ n = entries;
+ }
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ const uint32_t new_tail = old_tail + n;
+ rte_smp_rmb();
+
+ __rte_ring_dequeue_elems(r, old_tail, obj_table, sizeof(void *), n);
+ rte_smp_rmb();
+
+ r->cons.tail = new_tail;
+ return n;
+}
+
+static __rte_always_inline unsigned
+rte_ring_en_enqueue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
+{
+ const uint32_t capacity = r->capacity;
+ const uint32_t old_head = r->cons.head;
+ rte_smp_rmb();
+
+ const uint32_t entries = capacity + r->cons.tail - old_head;
+ if (n > entries) {
+ return 0;
+ }
+
+ const uint32_t new_head = old_head + n;
+ rte_smp_rmb();
+
+ __rte_ring_enqueue_elems(r, old_head, obj_table, sizeof(void *), n);
+ rte_smp_wmb();
+
+ r->cons.head = new_head;
+ return n;
+}
+
#ifdef __cplusplus
}
#endif
--
2.33.0
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/mengxuanzhang/dpdk.git
git@gitee.com:mengxuanzhang/dpdk.git
mengxuanzhang
dpdk
dpdk
master

搜索帮助