代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/gazelle 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
From 1c771f0af29dad27d29af371b13ab3013efc44c3 Mon Sep 17 00:00:00 2001
From: Lemmy Huang <huangliming5@huawei.com>
Date: Tue, 20 Jun 2023 17:30:34 +0800
Subject: [PATCH] lstack: cfg add app_exclude_cpus
Signed-off-by: Lemmy Huang <huangliming5@huawei.com>
---
src/lstack/core/lstack_cfg.c | 55 ++++++++++++++++---
src/lstack/include/lstack_cfg.h | 2 +
2 files changed, 49 insertions(+), 8 deletions(-)
diff --git a/src/lstack/core/lstack_cfg.c b/src/lstack/core/lstack_cfg.c
index daf49ea..8a627d5 100644
--- a/src/lstack/core/lstack_cfg.c
+++ b/src/lstack/core/lstack_cfg.c
@@ -47,6 +47,8 @@ static config_t g_config;
static int32_t parse_host_addr(void);
static int32_t parse_low_power_mode(void);
static int32_t parse_stack_cpu_number(void);
+static int32_t parse_app_bind_numa(void);
+static int32_t parse_app_exclude_cpus(void);
static int32_t parse_use_ltran(void);
static int32_t parse_mask_addr(void);
static int32_t parse_devices(void);
@@ -54,7 +56,6 @@ static int32_t parse_dpdk_args(void);
static int32_t parse_gateway_addr(void);
static int32_t parse_kni_switch(void);
static int32_t parse_listen_shadow(void);
-static int32_t parse_app_bind_numa(void);
static int32_t parse_main_thread_affinity(void);
static int32_t parse_unix_prefix(void);
static int32_t parse_read_connect_number(void);
@@ -111,6 +112,7 @@ static struct config_vector_t g_config_tbl[] = {
{ "kni_switch", parse_kni_switch },
{ "listen_shadow", parse_listen_shadow },
{ "app_bind_numa", parse_app_bind_numa },
+ { "app_exclude_cpus", parse_app_exclude_cpus },
{ "main_thread_affinity", parse_main_thread_affinity },
{ "unix_prefix", parse_unix_prefix },
{ "tcp_conn_count", parse_tcp_conn_count },
@@ -391,6 +393,46 @@ static int32_t parse_stack_cpu_number(void)
return 0;
}
+static int32_t parse_app_bind_numa(void)
+{
+ int32_t ret;
+ PARSE_ARG(g_config_params.app_bind_numa, "app_bind_numa", 1, 0, 1, ret);
+ return ret;
+}
+
+static int32_t parse_app_exclude_cpus(void)
+{
+ const config_setting_t *num_cpus = NULL;
+ const char *args = NULL;
+ char *tmp_arg;
+ int32_t cnt;
+
+ g_config_params.app_exclude_num_cpu = 0;
+ if (!g_config_params.app_bind_numa) {
+ return 0;
+ }
+
+ num_cpus = config_lookup(&g_config, "app_exclude_cpus");
+ if (num_cpus == NULL) {
+ return 0;
+ }
+
+ args = config_setting_get_string(num_cpus);
+ if (args == NULL) {
+ return -EINVAL;
+ }
+
+ tmp_arg = strdup(args);
+ cnt = separate_str_to_array(tmp_arg, g_config_params.app_exclude_cpus, CFG_MAX_CPUS, CFG_MAX_CPUS);
+ free(tmp_arg);
+ if (cnt <= 0 || cnt > CFG_MAX_CPUS) {
+ return -EINVAL;
+ }
+
+ g_config_params.app_exclude_num_cpu = cnt;
+ return 0;
+}
+
static int32_t numa_to_cpusnum(unsigned socket_id, uint32_t *cpulist, int32_t num)
{
char path[PATH_MAX] = {0};
@@ -458,6 +500,10 @@ int32_t init_stack_numa_cpuset(struct protocol_stack *stack)
}
}
+ for (int32_t idx = 0; idx < cfg->app_exclude_num_cpu; ++idx) {
+ CPU_SET(cfg->app_exclude_cpus[idx], &stack_cpuset);
+ }
+
ret = stack_idle_cpuset(stack, &stack_cpuset);
if (ret < 0) {
LSTACK_LOG(ERR, LSTACK, "thread_get_cpuset stack(%u) failed\n", stack->tid);
@@ -836,13 +882,6 @@ static int32_t parse_listen_shadow(void)
return ret;
}
-static int32_t parse_app_bind_numa(void)
-{
- int32_t ret;
- PARSE_ARG(g_config_params.app_bind_numa, "app_bind_numa", 1, 0, 1, ret);
- return ret;
-}
-
static int32_t parse_main_thread_affinity(void)
{
int32_t ret;
diff --git a/src/lstack/include/lstack_cfg.h b/src/lstack/include/lstack_cfg.h
index b000519..6da18cf 100644
--- a/src/lstack/include/lstack_cfg.h
+++ b/src/lstack/include/lstack_cfg.h
@@ -68,6 +68,8 @@ struct cfg_params {
uint32_t cpus[CFG_MAX_CPUS];
uint32_t send_cpus[CFG_MAX_CPUS];
uint32_t recv_cpus[CFG_MAX_CPUS];
+ uint16_t app_exclude_num_cpu;
+ uint32_t app_exclude_cpus[CFG_MAX_CPUS];
uint8_t num_ports;
uint16_t ports[CFG_MAX_PORTS];
char log_file[PATH_MAX];
--
2.33.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。