From ad698442a184fe85b40f4c631515437092c8db7c Mon Sep 17 00:00:00 2001
From: small_leek <xiasenlin1@huawei.com>
Date: Wed, 22 Dec 2021 16:00:41 +0800
Subject: [PATCH] add queryconfig.py

---
 libs/conf/queryconfig.py | 76 ++++++++++++++++++++++++++++++++++++++++
 main/common/Constant.py  | 15 ++++++--
 main/core/auto_extend.py | 31 ++++++++--------
 main/wsdm.py             | 57 +++++++++++-------------------
 4 files changed, 123 insertions(+), 56 deletions(-)
 create mode 100644 libs/conf/queryconfig.py

diff --git a/libs/conf/queryconfig.py b/libs/conf/queryconfig.py
new file mode 100644
index 0000000..f06493b
--- /dev/null
+++ b/libs/conf/queryconfig.py
@@ -0,0 +1,76 @@
+#!/bin/env python3
+# -*- encoding=utf-8 -*-
+"""
+# **********************************************************************************
+# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
+# [openeuler-jenkins] is licensed under the Mulan PSL v1.
+# You can use this software according to the terms and conditions of the Mulan PSL v1.
+# You may obtain a copy of Mulan PSL v1 at:
+#     http://license.coscl.org.cn/MulanPSL
+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
+# PURPOSE.
+# See the Mulan PSL v1 for more details.
+# Author: wangge
+# Create: 2021-10-29
+# **********************************************************************************
+"""
+import os
+import configparser
+from libs.log.logger import log_check
+from main.common.Constant import WSDM_INI_PATH
+
+
+class QueryConfig(object):
+    """
+    query config file
+    """
+    def __init__(self, path):
+        """
+        @description : initial class
+        -----------
+        @param :
+            path: path of configuration file
+        -----------
+        @returns :
+        -----------
+        """
+        self.conf_path = path
+        
+
+    def get_value(self, section, key):
+        """
+        @description : get value of (section, key)
+        -----------
+        @param :
+            section:
+            key:
+        -----------
+        @returns : value
+        -----------
+        """
+
+        if not os.path.exists(self.conf_path):
+            log_check.error(f"config file is not exist")
+            return None
+
+        config = configparser.ConfigParser()
+        config.read(self.conf_path)
+        section_list = config.sections()
+
+        try:
+            item_value = config.get(section, key)
+            return item_value
+        except AttributeError as err:
+            log_check.error(f"reason: {err}")
+            return None
+        except configparser.NoSectionError:
+            log_check.error(f"appointed section is not exist, valid section listed as below: {section_list}")
+            return None
+        except configparser.NoOptionError:
+            option = config.options(section)
+            log_check.error(f"section does not contain specified key, valid key value listed as below: {option}")
+            return None
+
+query_config = QueryConfig(WSDM_INI_PATH)
+
diff --git a/main/common/Constant.py b/main/common/Constant.py
index 4464967..7f4a44d 100644
--- a/main/common/Constant.py
+++ b/main/common/Constant.py
@@ -16,15 +16,24 @@
 # ********************************************************************
 """
 
-#directory path constant
+# path of wsdm.ini
+WSDM_INI_PATH = '/usr/li-wen/wsdm.ini'
+
+# directory path constant
 HISTORY_LOG_PATH = '/var/tmp/li-wen/getbuildtime_history.log'
 
-#value constant
+# value constant
 BUILD_TIME_ITEM_LEN = 17
 BUILD_TIME_END_INDEX = 8
 BUILD_TIME_END_POS = 9
 BUILD_TIME_START_INDEX = 7
 BUILD_TIME_START_POS = 11
 
-#file name constant
+# file name constant
 GETBUILDTIME_CHECK_SHELL = 'getbuildtime_check_param'
+
+# multi architecture
+MULTI_ARCH = ["aarch64", "x86"]
+
+# excluded workers
+EXCLUDED_WORKERS = '/usr/li-wen/libs/conf/excluded_workers.yaml'
diff --git a/main/core/auto_extend.py b/main/core/auto_extend.py
index cf0c6b7..0d0a967 100644
--- a/main/core/auto_extend.py
+++ b/main/core/auto_extend.py
@@ -27,9 +27,8 @@ from libs.conf.queryconfig import query_config
 from main.common.aes_decode import AESEncryAndDecry
 from main.common.wsdm_thread import WsdmThread
 from main.monitor.workerstatus import QueryOBSWorker
-
-multi_arch = ["aarch64", "x86"]
-worker_excluded = '/usr/li-wen/libs/conf/excluded_workers.yaml'
+from main.common.Constant import EXCLUDED_WORKERS
+from main.common.Constant import MULTI_ARCH
 
 class AutoExtendWorker(object):
     """
@@ -65,7 +64,7 @@ class AutoExtendWorker(object):
         new_workers_info = []
         thread_arch_level = []
         
-        for idx, arch in enumerate(multi_arch):
+        for idx, arch in enumerate(MULTI_ARCH):
             for level_idx in range(3):
                 level = 'l' + str(level_idx + 1)
                 try:
@@ -75,8 +74,8 @@ class AutoExtendWorker(object):
                     log_check.error(f"reason: {err}")
                     continue
 
-                default_instances = query_config(self.worker_conf[level_idx], "instances")
-                max_limit = int(query_config(self.worker_conf[level_idx], "max_limit"))
+                default_instances = query_config.get_value(self.worker_conf[level_idx], "instances")
+                max_limit = int(query_config.get_value(self.worker_conf[level_idx], "max_limit"))
 
                 # 计算得到预申请的数目
                 if schedule <= idle: 
@@ -135,8 +134,7 @@ class AutoExtendWorker(object):
         result_release = []
         excluded_workers = self.excluded_workers # 不在评估是否释放的worker范围
 
-        for idx in range(2):
-            arch = multi_arch[idx]
+        for idx, arch in enumerate(MULTI_ARCH):
             try:
                 aarh_idle_workers = idle_workers[idx].get(arch)
             except AttributeError as err:
@@ -177,7 +175,6 @@ class AutoExtendWorker(object):
         """
         release_worker_ip = idle_workers
         count = 0
-        release_worker = {}
 
         while release_worker_ip and count < num_check:
             workers_instance_state = self.worker_query.get_worker_instance(release_worker_ip)
@@ -222,10 +219,10 @@ class AutoExtendWorker(object):
         -----------
         """
         apply_worker = []
-        default_instances = query_config(self.worker_conf[level_idx], "instances")
-        vcpus = query_config(self.worker_conf[level_idx], "vcpus")
-        ram = query_config(self.worker_conf[level_idx], "ram")
-        jobs = query_config(self.worker_conf[level_idx], "jobs")
+        default_instances = query_config.get_value(self.worker_conf[level_idx], "instances")
+        vcpus = query_config.get_value(self.worker_conf[level_idx], "vcpus")
+        ram = query_config.get_value(self.worker_conf[level_idx], "ram")
+        jobs = query_config.get_value(self.worker_conf[level_idx], "jobs")
         level = 'l' + str(level_idx + 1)
         log_check.info(f"{thread_name}------Apply new workers: arch: {arch}, flavor: {level}, number: {num}")
         result = self.server.create(arch, level, passwd, num)
@@ -274,7 +271,7 @@ class AutoExtendWorker(object):
 
         delete_result = self.server.delete(ips)
         log_check.info(f"{thread_name}-------1st Call HWCloud delete:{delete_result} \n {ips}")
-        
+
         clean_result = self.worker_query.delete_down_obsworker(hostnames)
         log_check.info(f"{thread_name}--------2nd Call clean up:{clean_result} \n {hostnames}")
 
@@ -292,8 +289,8 @@ class AutoExtendWorker(object):
         -----------
         """
         password = "**********"
-        key = query_config("AES_Decry_Conf", "key")
-        decryption_file = query_config("AES_Decry_Conf", "worker_login")
+        key = query_config.get_value("AES_Decry_Conf", "key")
+        decryption_file = query_config.get_value("AES_Decry_Conf", "worker_login")
 
         if not os.path.isfile(decryption_file):
             log_check.error("NO decrption file")
@@ -316,7 +313,7 @@ class AutoExtendWorker(object):
         -----------
         """
         result_workers = dict()
-        with open(worker_excluded, encoding='utf-8') as f:
+        with open(EXCLUDED_WORKERS, encoding='utf-8') as f:
             try:
                 excluded_workers_yaml = yaml.safe_load(f)
             except yaml.scanner.ScannerError as err:
diff --git a/main/wsdm.py b/main/wsdm.py
index 748399f..e95613a 100644
--- a/main/wsdm.py
+++ b/main/wsdm.py
@@ -17,8 +17,9 @@
 This is main entrance
 """
 import sys
-sys.path.append('/usr/li-wen')
+# sys.path.append('/usr/li-wen')
 import csv
+import copy
 import codecs
 import configparser
 from logging import log
@@ -31,14 +32,14 @@ from main.monitor.project import QueryProject
 from libs.conf.queryconfig import query_config
 from libs.log.logger import log_check
 
-interval_for_check_schedule = int(query_config("Monitor", "interval_for_check_schedule"))
-interval_for_cycle_check_new_worker = int(query_config("Monitor", "interval_for_cycle_check_new_worker"))
-num_for_check_reserved_worker = int(query_config("Monitor", "num_for_check_reserved_worker"))
-interval_for_check_reserved_worker = int(query_config("Monitor", "interval_for_check_reserved_worker"))
+interval_for_check_schedule = int(query_config.get_value("Monitor", "interval_for_check_schedule"))
+interval_for_cycle_check_new_worker = int(query_config.get_value("Monitor", "interval_for_cycle_check_new_worker"))
+num_for_check_reserved_worker = int(query_config.get_value("Monitor", "num_for_check_reserved_worker"))
+interval_for_check_reserved_worker = int(query_config.get_value("Monitor", "interval_for_check_reserved_worker"))
 # 获取当前关联的project列表
-projects = query_config("Monitor", "projects").split()
+projects = query_config.get_value("Monitor", "projects").split()
 # 获取project所属的backend
-repo_server = query_config("Monitor", "repo_server")
+repo_server = query_config.get_value("Monitor", "repo_server")
 
 obs_worker = QueryOBSWorker()
 project = QueryProject()
@@ -81,8 +82,8 @@ def check_worker_enable(workers, passwd, interval, checK_start):
 
     flag = "Timeout, end verification."
     check_end = time.monotonic()
-    abnormal_workers = []
-    temp_workers = workers
+    abnormal_workers = workers[:]
+    
     # {"ip", "arch", "level", "vcpus", "ram", "jobs", "instances"}
     while (check_end - checK_start) < interval_for_check_schedule:
         if not workers:
@@ -90,42 +91,24 @@ def check_worker_enable(workers, passwd, interval, checK_start):
             break
         for worker in workers:
             wait_for_check_config = dict()
-            try:
-                ip = worker.get("ip")
-                wait_for_check_config["vcpus"] = worker.get("vcpus")
-                wait_for_check_config["ram"] = worker.get("ram")
-                wait_for_check_config["jobs"] = worker.get("jobs")
-                wait_for_check_config["instances"] = worker.get("instances")
-                wait_for_check_config["repo_server"] = repo_server
-            except (AttributeError, KeyError) as err:
-                log_check.error(f"reason: {err}")
-                continue
-            except (configparser.NoOptionError, configparser.NoSectionError) as err:
-                log_check.error(f"reason: {err.message}")
-                continue
+            ip = worker.get("ip")
+            wait_for_check_config["vcpus"] = worker.get("vcpus")
+            wait_for_check_config["ram"] = worker.get("ram")
+            wait_for_check_config["jobs"] = worker.get("jobs")
+            wait_for_check_config["instances"] = worker.get("instances")
+            wait_for_check_config["repo_server"] = repo_server
 
             service_status = obs_worker.check_service(ip, passwd) # 校验worker核心服务的状态
             config_same = obs_worker.check_worker_config(ip, passwd, wait_for_check_config)
 
             if service_status and config_same:
-                temp_workers.remove(worker) # 校验OK的包从列表中去掉
-
-        workers = temp_workers
+                abnormal_workers.remove(worker) # 校验OK的包从列表中去掉
+        
+        workers = abnormal_workers[:]
         log_check.info(f"After {interval}s, we go on next checking workers' service and configuration......")
         time.sleep(interval)
         check_end = time.monotonic()
-        
     log_check.info(flag)
-
-    for worker in temp_workers:
-        try:
-            abnormal_workers.append( worker.get("ip"))
-        except (AttributeError, KeyError) as err:
-            log_check.error(f"reason: {err}")
-            continue
-        except (configparser.NoOptionError, configparser.NoSectionError) as err:
-            log_check.error(f"reason: {err.message}")
-            continue
         
     return abnormal_workers
     
@@ -201,12 +184,14 @@ def main_progrecess():
             log_check.debug(f"Successfully create these workers:{apply_worker} , then check their status:")
 
             if (time.monotonic() - start) >= interval_for_check_schedule:
+                log_check.warning("Timeout, terminate the verification of the newly created worker status!")
                 continue
             
             # 校验新申请的worker的可用状态,abnormal_worker表示校验后异常或者不满足规格要求的worker
             abnormal_workers = check_worker_enable(apply_worker, passwd, interval_for_cycle_check_new_worker, start)
 
             if (time.monotonic() - start) >= interval_for_check_schedule:
+                log_check.warning("Timeout, terminate the delete of the newly created but abnormal workers!")
                 continue
 
             # 释放未达到可用状态的worker并清理后台相关信息
-- 
Gitee