代码拉取完成,页面将自动刷新
同步操作将从 banzhuanxiaodoubi/wenjian 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
import os
import datetime
import random
import redis
from flask import jsonify
import pytz
import secrets
import string
import redis
from dataclasses import dataclass
r_pool = redis.ConnectionPool(host='172.168.244.227', port=6379, db=2, password='radiaTest1234')
r = redis.Redis(connection_pool=r_pool, decode_responses=True)
class E:
def __init__(self, a, b):
self.a = a
self.b = b
class g:
def __init__(self, e):
self._a = e.a
self._b = e.b
def shuchu(self):
print(self._a, self._b)
class A:
def __init__(self, body):
self._body = body
class B(A):
def __init__(self, auth, body):
self._auth = auth
super().__init__(body)
class C(B):
def work(self):
print(self._auth, self._body)
@dataclass
class Z:
h: int
w_solid: int = 0
e_min: float = 0.0
from enum import Enum
class BaseEnum(Enum):
@classmethod
def code(cls, attr):
if hasattr(cls, attr):
return getattr(cls, attr).value
else:
return None
class JobPriorityEnum(BaseEnum):
CVE_JOB = 1
RUN_TEST_JOB = 6
GENERAL = 7
import tempfile
r_pool = redis.ConnectionPool(host='172.168.131.13', port=6379, db=2, password='radiaTest1234')
r = redis.StrictRedis(connection_pool=r_pool, decode_responses=True)
class RedisKey(object):
pmachine = lambda group_ip, pmachine_ip: f'pmachine_{group_ip}_{pmachine_ip}'
import queue
class Task(object):
def __init__(self, priority, name, sub_time):
self._priority = priority
self._name = name
self._time = sub_time
def __str__(self):
return "Task(priority={, name={, time={}).".format(self._priority, self._name, self._time)
def __lt__(self, other):
if self._priority != other._priority:
return self._priority < other._priority
else:
return self._time < other._time
class A():
@property
def fulei(self):
return 1
def install(self):
print(self.fulei)
class B(A):
@property
def fulei(self):
return 2
from datetime import timedelta, datetime
from flask import app
import os
from subprocess import getoutput, getstatusoutput
import re
from urllib.parse import urlparse
class A:
x = 1
from openqa_client.client import OpenQA_Client
import datetime
import configparser
from urllib.parse import urlunparse
from subprocess import getstatusoutput
import time
import hmac
import hashlib
def _update_headers():
headers = {}
headers['Content-type'] = 'application/x-www-form-urlencoded'
headers['Accept'] = 'application/json'
headers['X-API-Key'] = '0CAB34B193629285'
timestamp = time.time()
api_hash = hmac.new('1D3E96A37823852E'.encode(),
'{0}{1}'.format('/api/v1/isos', timestamp).encode(),
hashlib.sha1)
headers['X-API-Microtime'] = str(timestamp).encode()
headers['X-API-Hash'] = api_hash.hexdigest()
return headers
import http.client
import requests
import json
import json
import datetime
class DateEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(0, datetime.date):
return o.strftime("%Y-%m-%d")
else:
return super().default(o)
import urllib
from datetime import datetime
t = 1
t1 = [2]
def djsafljdsaf(t, t1):
t = 2
t1.append(3)
djsafljdsaf(t, t1)
print(t, t1)
class ScopeKey(object):
oneid = lambda org_id: {"a": org_id}
import io
from bcrypt import re
from flask import current_app
import openpyxl
import markdown
from lxml import etree
class MdUtil:
DEFAULT_MD_EXT = [
"markdown.extensions.extra",
"markdown.extensions.codehilite"
]
@staticmethod
def get_md_tables2html(md_content):
"""parse md table 2 lxml table list
Args:
md_content (str): content of markdown file
return:
list: [ lxml_table_1, lxml_table_2]
"""
try:
html_content = markdown.markdown(md_content, extensions=MdUtil.DEFAULT_MD_EXT)
except UnicodeDecodeError as ude:
current_app.logger.error(f"unsupported unicode in markdown file :{str(ude)}")
html_content = ""
except ValueError as ve:
current_app.logger.error(f"unsupported grammar in markdown file :{str(ve)}")
html_content = ""
if not html_content:
return []
html_etree = etree.HTML(html_content, parser=etree.HTMLParser(encoding="utf-8"))
table_list = html_etree.xpath("//table")
return table_list
@staticmethod
def md2html(md_content, file_path):
"""convert md text to html file
Args:
md_content (str): content of markdown file
file_path (str): html file path
"""
try:
html_content = markdown.markdown(md_content, output_format="html", extensions=MdUtil.DEFAULT_MD_EXT)
except UnicodeDecodeError as ude:
current_app.logger.error(f"unsupported unicode in markdown file :{str(ude)}")
html_content = ""
except ValueError as ve:
current_app.logger.error(f"unsupported grammar in markdown file :{str(ve)}")
html_content = ""
if not html_content:
return
fhtml = io.open(file_path, "w", encoding="utf-8")
fhtml.write(html_content)
fhtml.close()
@staticmethod
def get_md_tables2list(md_content, resolver):
"""parse md table 2 python list
Args:
md_content (str): content of markdown file
return:
list: [[md table 1],[md table 2]]
"""
table_list = MdUtil.get_md_tables2html(md_content)
tables_content_list = []
for table in table_list:
tables_content_list.append(
resolver(table).parse_table()
)
return tables_content_list
@staticmethod
def df2md(df, md_path):
"""convert pandas dataframe to md text
Args:
df (List(Dict[colname, value])): dataframe object loaded
md_path (str): markdown file path
Return:
filepath (str): the convert file save path
"""
md_content = ""
title = "|"
split_line = "|"
for col_name in df[0].keys():
title = f"{title} {col_name} |"
split_line = f"{split_line} -- |"
md_content += title + "\n"
md_content += split_line + "\n"
for row in df:
row_content = "|"
for col in row.values():
cell_content = str(col).replace("\n", "<br/>").replace("nan", "")
row_content += cell_content + "|"
md_content += row_content + "\n"
with open(md_path, "w") as file:
file.write(md_content)
return md_path
@staticmethod
def md2wb(md_content, wb_path, sheet_name):
"""convert md text to excel workbook
Args:
md_content (str): source markdown content which only has single table
wb_path (str): target workbook path (filepath should be valid)
sheet_name (str): name of the target sheet of saving workbook
Return:
filepath (str): the convert file save path
"""
escape_dict = {
"<br>": "\n",
"<br/>": "\n",
"\<": "<",
"\\$": "\$",
"\|": "|",
}
md_lines = md_content.split("\n")
# 定位表头
header = 0
split_pattern = r"^(\|\s*-+\s*)+\|$"
for i in range(1, len(md_lines) - 1):
if re.match(split_pattern, md_lines[i]):
if len(md_lines[i - 1].strip("|").split("|")) == len(md_lines[i].strip("|").split("|")):
header = i - 1
title = md_lines[header].strip("|").split("|")
cols_num = len(title)
wb = openpyxl.load_workbook(wb_path)
ws = wb[sheet_name]
# 表头赋值
for col in range(cols_num):
ws.cell(1, col + 1).value = title[col]
# 搜索有效表行
row_pattern = r"^" + "".join([
'(?:\|((?!\s*(?:grep|awk|sed|tee|sort|uniq|tail|more|less)).*[^\\\])?)' for _ in range(cols_num)
]) + "\|$"
row_index = 2
def escape_content(cell_content):
if not cell_content:
cell_content = ''
escape_content = cell_content.strip()
for key, value in escape_dict.items():
escape_content = escape_content.replace(key, value)
return escape_content
for i in range(header + 2, len(md_lines)):
result = re.match(row_pattern, md_lines[i])
if result:
cells = result.groups()
# md转行替换,格式转换
body = list(map(escape_content, cells))
# 表行赋值
if len(body) == cols_num:
for col in range(cols_num):
ws.cell(row_index, col + 1).value = body[col]
row_index += 1
wb.save(wb_path)
return wb_path
def arr2tree(source, parent):
tree = []
for item in source:
if item['parent_key'] == parent:
if not item['leaf']:
item['children'] = arr2tree(source, item['key'])
tree.append(item)
return tree
def md2tree(source, parent, level):
tree = {'data': {'text': parent}}
html_etree = etree.HTML(source, parser=etree.HTMLParser(encoding="utf-8"))
node_level = "h" + str(level)
node_next_level = "h" + str(level + 1)
nodes = html_etree.xpath("//" + node_level)
if level == 1:
if len(nodes) != 0:
search_content = "(.*)" + "<" + node_level + ">" + nodes[0].text + "</" + node_level + ">"
content = re.findall(search_content, source, re.DOTALL)
tree["data"].update({
"content": content[0]
})
else:
tree["data"].update({
"content": source
})
if nodes:
tree['children'] = list()
n = 1
for index in range(len(nodes)):
if n == len(nodes):
search_content = "<" + node_level + ">" + nodes[index].text + "</" + node_level + ">" + "(.*)"
# print(search_content)
next_content = re.findall(search_content, source, re.DOTALL)
next_etree = etree.HTML(next_content[0], parser=etree.HTMLParser(encoding="utf-8"))
next_nodes = next_etree.xpath("//" + node_next_level)
if len(next_nodes):
leaf = md2tree(next_content[0], nodes[index].text, level + 1)
pattern = "(.*)" + "<" + node_next_level + ">" + next_nodes[0].text + "</" + node_next_level + ">"
content = re.findall(pattern, next_content[0], re.DOTALL)
leaf["data"].update({
"content": content[0]
})
tree['children'].append(leaf)
else:
leaf = {'data': {'text': nodes[index].text, "content": next_content[0]}}
tree['children'].append(leaf)
else:
n += 1
search_content = "<" + node_level + ">" + nodes[index].text + "</" + node_level + ">" + "(.*?)" + "<" + node_level + ">" + nodes[index + 1].text + "</" + node_level + ">"
# print(search_content,type(source))
next_content = re.findall(search_content, source, re.DOTALL)
# print(next_content)
next_etree = etree.HTML(next_content[0], parser=etree.HTMLParser(encoding="utf-8"))
next_nodes = next_etree.xpath("//" + node_next_level)
if len(next_nodes):
leaf = md2tree(next_content[0], nodes[index].text, level + 1)
if n == 2:
pattern = "(.*)" + "<" + node_next_level + ">" + next_nodes[0].text + "</" + node_next_level + ">"
content = re.findall(pattern, next_content[0], re.DOTALL)
leaf["data"].update({
"content": content[0]
})
tree['children'].append(leaf)
else:
leaf = {'data': {'text': nodes[index].text, "content": next_content[0]}}
tree['children'].append(leaf)
return tree
import html2markdown
def tree2md(tree, parent, level):
if level == 1:
file_name = tree.get("data").get("text") + ".md"
if "content" in tree.get("data").keys():
parent = tree.get("data").get("content")
if "children" in tree.get("data").keys():
child = tree.get("data").get("children")
for item in child:
parent += "<h" + str(level) + ">" + item.get("data").get("text") + "</h" + str(level) + ">" + item.get("data").get("content")
if "children" in item.get("data").keys():
parent += tree2md(item, parent, level+1)
md_content = html2markdown.convert(parent)
path = os.path.join("/data/BlueCloud/host/wangdi/下载", file_name)
with open(path, "w") as file:
file.write(md_content)
return parent
from datetime import datetime, timezone
import base64
from Crypto.Cipher import AES
class FileAES:
def __init__(self):
self.key = "E7BC96E7A081E69C89E8AFAF".encode('utf-8')
self.mode = AES.MODE_ECB
def encrypt(self, text):
"""加密函数"""
file_aes = AES.new(self.key, self.mode)
text = text.encode('utf-8')
while len(text) % 16 != 0:
text += b'\x00'
en_text = file_aes.encrypt(text)
return str(base64.b64encode(en_text), encoding='utf-8')
def decrypt(self, text):
"""解密函数"""
file_aes = AES.new(self.key, self.mode)
text = bytes(text, encoding='utf-8')
text = base64.b64decode(text)
de_text = file_aes.decrypt(text)
return_text = str(de_text, encoding='utf-8').strip(b'\x00'.decode())
return return_text
# from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
# import requests
if __name__ == "__main__":
# print(datetime.now(tz=timezone.utc))
# start_time = datetime.strptime(
# datetime.now(),
# "%Y-%m-%d %H:%M:%S"
# )
# print(type(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
with open("/data/BlueCloud/host/wangdi/test.md", "r") as f:
content = f.read()
# # print(content)
html_content = markdown.markdown(content, extensions=MdUtil.DEFAULT_MD_EXT)
res = md2tree(html_content, "test", 1)
child = res.get("children")
print(child[4].get("data").get("content"))
# data = json.loads(res, ensure_ascii=False)
# if "data" in res.keys():
# print(json.loads(res, ensure_ascii=False))
# file_name = res.get("data").get("text")
# parent = str()
# tree2md(res, parent, 1)
#
# else:
# print("empty file")
# print(html_content)
# with open("/data/BlueCloud/host/wangdi/test.html","w") as f1:
# f1.write(html_content)
# res1= html_content.split("<h1>概述</h1>")
# print(res1)
# res = re.findall('(.*)<h1>概述</h1>', html_content, re.DOTALL)
# print(res)
# html_etree = etree.HTML(res[0], parser=etree.HTMLParser(encoding="utf-8"))
# print(html_etree)
# table_list = html_etree.xpath("//h2")
# print(table_list)
# tmp = ''
# n = 1
# for li in table_list:
# if n == 1:
# n += 1
# tmp = li.text
# elif n == len(table_list):
# search_content = "<h1>" + li.text + '</h1>' + "(.*?)"
# res = re.findall(search_content, html_content, re.DOTALL)
#
# print(search_content)
# else:
# search_content = "<h1>" + tmp + '</h1>' + "(.*?)" + "<h1>" + li.text + '</h1>'
# tmp = li.text
# res = re.findall(search_content, html_content, re.DOTALL)
# print(search_content)
#
# # print(li.text)
#
# print(table_list)
# arr = [
# {'key':'0-0','title':'0-0','parent_key':'0','leaf':False},
# {'key': '0-1', 'title': '0-1', 'parent_key': '0', 'leaf': False},
# {'key': '0-0-0', 'title': '0-0-0', 'parent_key': '0-0', 'leaf': False},
# {'key': '0-1-0', 'title': '0-1-0', 'parent_key': '0-1', 'leaf': True},
# {'key': '0-0-1', 'title': '0-0-1', 'parent_key': '0-0', 'leaf': True},
# {'key': '0-0-0-0', 'title': '0-0-0-0', 'parent_key': '0-0-0', 'leaf': True}
# ]
# result = arr2tree(arr,'0')
# print(json.dumps(result))
# org_id = "oneid"
# result = getattr(ScopeKey,org_id)
# print(type(result(1)))
# Z= []
# print(type("%20".join(Z)))
# zzz={'a':1}
# if 'a' in zzz:
# print(True)
# elif not hasattr(zzz,"b"):
# print(False)
# else:
# pass
# a= 100 or 200
# print(a)
# print(datetime.strptime("2022-12-01 12:01:03", "%Y-%m-%d %H:%M:%S"). \
# replace(tzinfo=pytz.timezone('Asia/Shanghai')))
# print(datetime.strptime("2022-12-01 12:01:03", "%Y-%m-%d %H:%M:%S"). \
# astimezone(pytz.timezone('Asia/Shanghai')))
# ttt=12.36
# print(type(ttt))
# print('a'<'b'<'c')
# m = {'a':{'b':1},"c":1}
# print(type(m.get('a')))
# N1=10
# N2=4
# print(divmod(N1,N2))
# zz='1234'
# print(zz[1:3:2])
# import requests
# rep=requests.get("http://121.36.84.172/dailybuild//openEuler-20.03-LTS-SP3/openeuler-2023-01-10-13-11-39/ISO/x86_64/")
# _r = requests.get(
# "https://172.168.131.93:21500/api/v1/product",
# headers={
# "content-type": "application/json;charset=utf-8",
# "authorization": "JWT eyJhbGciOiJIUzUxMiIsImlhdCI6MTY3Mzg1ODA2NywiZXhwIjoxNjc0MjE4MDY3fQ.qVte2KlKj3Dri06MHsw+5fJhKXMqNWLgFVPATnM6tWSceiZQlzC8qA16G/4YwV7a.KJrDCbmddGSmPnMdYnqD6mQ5Gi4LVcn0TBxOWzUy_OqoxYzWfpm27qhd7LUTrxutR3VQSFOETghU3UI6Kh2cBQ"
# },
# verify=False
# )
# print(_r.status_code)
# print(rep.status_code)
# test={'a':1}
# test.clear()
# if not test:
# print("True")
# a=1
# b=2
# c=3
# if not all((a,b,c)):
# print("true1")
# if test.get("c"):
# print("true")
# import base64
# f = open("/data/BlueCloud/host/wangdi/2023-02-01_11-09.png",'rb')
# ls_f = base64.b64encode(f.read())
# f.close()
# print(ls_f)
# print(time.time())
# headers = _update_headers()
# params = dict()
# params.update({
# 'TEST': "base_test",
# 'release': 'openEuler-22.09-x86_64-dvd',
# 'build': '2022-11-08-11-59-46',
# 'DISTRI': 'openEuler',
# 'VERSION': '22.09',
# 'ARCH': 'x86_64',
# 'FLAVOR': 'dvd'
# })
# p = urllib.parse.urlencode(params)
#
# con = http.client.HTTPConnection('172.168.131.95', port=80, timeout=10)
# # con.request('GET', '/api/v1/jobs/122747')
# con.request('POST', '/api/v1/isos', body=p, headers=headers)
# res = con.getresponse()
# resp = requests.request(
# 'POST',
# 'http://172.168.131.95:80/api/v1/isos',
# params=params,
# timeout=10,
# headers=headers
# )
print(res.status,res.reason)
# r_str = res.read()
# print(r_str)
# r_dict = {'count': 4, 'failed': [], 'ids': [112515, 112516, 112517, 112518]}
# ids = r_dict['ids']
# z = json.loads(r_str)
# print(z.get("job").get("name").split("@")[0].split("2022-12-25-21-29-22-")[1])
# for i in eval("['ab','b']"):
# print(str(i))
# for j in range(2):
# break
# print(z.get("ids"))
# exitcode, out = getstatusoutput(
# "curl {}".format("http://121.36.84.172/dailybuild/openEuler-22.09/openeuler_ARM64/release_iso"))
# b = out.split("dailybuild")[1].replace("//","")
# print(b)
# d = re.findall(r'\d+-\d+-\d+-\d+-\d+-\d+', b.split("/")[1])
# print(d)
# c = out.split("/")[-1]
# print(c)
# print(os.path.join("http://172.168.131.94:9400/repo_list/dailybuild.repo", b, "", "a"))
# exitcode1, out1 = getstatusoutput("ls /home | grep '^openeuler-' | tail -n 1 | awk -F 'openeuler-' '{print $2}'")
# print(out1)
# print(os.path.dirname("/a/b/c"))
# print(re.findall(r'openeuler(-.*?)/', "http://172.168.131.94:9400/repo_list/dailybuild.repo//openEuler-22.03-LTS-SP1/openeuler-2022-11-22-19-22-50/ISO/aarch64/openEuler-22.03-LTS-SP1-aarch64-dvd.iso")[0])
# print(re.findall(r'(.*)I', "http://172.168.131.94:9400/repo_list/dailybuild.repo//openEuler-22.03-LTS-SP1/openeuler-2022-11-22-19-22-50/ISO/aarch64/openEuler-22.03-LTS-SP1-aarch64-dvd.iso"))
# exitcode12, out12 = getstatusoutput("wget -c http://172.168.131.94:9400/repo_list/dailybuild.repo/openEuler-22.09/openeuler-2022-11-07-20-55-22/ISO/x86_64/openEuler-22.09-x86_64-dvd.iso -O ./ceshi.iso && ls .")
# print(out12)
# client = OpenQA_Client(server='172.168.131.95', scheme="https")
# print(client.openqa_request('GET', 'jobs/108878'))
# params = urllib.urlencode({"a":1})
# print(params)
# print(client.openqa_request('POST', 'jobs', params={
# "name": "test_wangdi",
# "DISTRI": "openeuler",
# "ARCH": "x86_64",
# "BACKEND": "qemu",
# "FLAVOR": "dvd",
# "WORKER_CLASS": "qemu_x86_64",
# "HDDSIZEGB": "30",
# "ISO": "openEuler-22.09-2022-09-26-18-35-55-x86_64-dvd.iso",
# "ISO_MAXSIZE": "737280000",
# "MACHINE": "x86_64_VM",
# "NICMODEL": "virtio-net",
# "NICTYPE": "tap",
# "NICVLAN": "1,1,1",
# "NICMAC": "e0:9f:41:41:41:69",
# "NUMDISKS": "1",
# "PUBLISH_HDD_1": "openEuler-Server-22.09-x86_64.qcow2",
# "QEMU": "x86_64",
# "QEMUCPU": "host",
# "QEMUCPUS": "4",
# "QEMUMACHINE": "accel=kvm",
# "QEMURAM": "4096",
# "ROOT_PASSWORD": "openEuler12#$",
# "VGA": "std",
# "VIRTIO_CONSOLE": "1",
# "VERSION": "22.09",
# "TAPSCRIPT": "/etc/qemu-ifup",
# "BUILD": "2022-09-26-18-35-55",
# "TEST": "guimode_x86_001",
# "INSTALL": "1",
# "TESTCASES": "install_guimode_001"
# }))
# config = configparser.ConfigParser()
# test = urlunparse(("https", "172.168.131.95", "", "", "a=1&b=2", ""))
# print(config.get(test, "a"))
# st = "2022:11:07:17:45:19"
# ed = "2022:11:07:18:13:10"
# st_1 = "2022:11:07:18:36:25"
# ed_1 = "2022:11:07:19:04:09"
# st_2 = "2022:11:07:08:59:40"
# ed_2 = "2022:11:07:09:27:21"
# st_3 = "2022:11:07:09:44:00"
# ed_3 = "2022:11:07:09:56:28"
# st_4 = "2022:11:07:10:07:49"
# ed_4 = "2022:11:07:10:20:14"
# st_5 = "2022:11:07:10:23:01"
# ed_5 = "2022:11:07:10:35:28"
# st_6 = "2022:11:07:10:49:38"
# ed_6 = "2022:11:07:10:57:18"
# st_7 = "2022:11:07:11:09:33"
# ed_7 = "2022:11:07:11:17:10"
# st_8 = "2022:11:07:11:18:31"
# ed_8 = "2022:11:07:11:26:09"
# st_9 = "2022:11:07:18:08:23"
# ed_9 = "2022:11:07:19:14:50"
# st1 = datetime.datetime.strptime(st_9, "%Y:%m:%d:%H:%M:%S")
# ed1 = datetime.datetime.strptime(ed_9, "%Y:%m:%d:%H:%M:%S")
# seconds = (ed1 - st1).seconds
# print(seconds)
# a = round(57 / 509, 2)
# print(a)
# for i in range(10):
# print(i)
# if i == 9:
# print("end")
# teste = {"a": 1, "b": 2}
# teste.pop("a")
# print(teste)
# from urllib import request
# request.urlopen("http://121.36.84.172/dailybuild/openEuler-22.03-LTS-Next/openeuler_ARM64/release_iso")
# print(os.path.join("/a/b", "b.log", "c/", "d", ""))
# max_endtime = (
# datetime.now(pytz.timezone('Asia/Shanghai'))
# + timedelta(days=7)
# )
# print(type(max_endtime))
# if "1970-01-01 08:00:00" > max_endtime.strftime("%Y-%m-%d %H:%M:%S"):
# print(True)
# if datetime.strptime("2022-12-01 08:00:00", "%Y-%m-%d %H:%M:%S").replace(
# tzinfo=pytz.timezone('Asia/Shanghai')).__gt__(max_endtime):
# print(True)
# test = datetime.strptime("2022-11-12 00:00:00", "%Y-%m-%d %H:%M:%S")
# print(type(test), test)
# a = [1]
# b = [2]
# a.extend(b)
# print(a)
#
# c = list()
# d = dict()
# for i in range(3):
# # d.update({
# # 'test': i
# # })
# c.append(dict(test=i))
# print(c)
# url = "http://139.9.114.65:9400/repo_list/mugen.mirror/iteration/openEuler/openEuler-22.09/round-2/aarch64/openEuler-22.09.aarch64.qcow2"
# o = urlparse(url)
# print(o.path)
# print(dict(a=1))
# print("/var".join(o.path))
# block = str()
# print(block)
# print(dir(int))
# print(dir(type))
# setattr("")
# print(dir(A))
# dic = dict()
# dic.setdefault('a', []).append(1)
# dic.setdefault('a', []).append(2)
# dic.setdefault('b', []).append(1)
# print(dic)
# try:
# a = 1
# except:
# print("no")
# print(a)
# _, result = getstatusoutput("ls "
# "-ll")
# print(type(result))
# list = result.split("\n")
# for x in result:
# print(re.search(r'(\w\d+)\n', x).group(1))
# if "running" in "- 22-08-17-1660725979.2064073-8tpr3zd6di running":
# print("ok")
# else:
# print("no")
# print(re.findall(r'\w+-\w+-\w+-\w+.\w+-\w+', "- 22-08-17-1660725979.2064073-8tpr3zd6di shut off"))
# print(os.path.join(os.path.dirname(__file__),"worker/utils/config_bash.sh"))
# a = [1, 2, 3]
# for _ in range(len(a)):
# print(1)
# try:
# z = 1/1
# except RuntimeError as e:
# print(e)
# # continue
# finally:
# print("bizhixing")
# continue
# a = {"a": 1}
# print("a {}".format(a))
# print([1,2,3].pop())
# print(JobPriorityEnum.code("CVE_JOB"))
# print(type(app))
# B().install()
# print(tsq.get())
# file = tempfile.mkstemp()[1]
# print(file)
# r.delete("1")
# st = Z(2, w_solid=1)
# print(st.h)
# print(round(float("10000.1"),2))
# print(random.shuffle([secrets.choice(string.ascii_letters) for _ in range(3)]
# + [secrets.choice(string.digits) for _ in range(3)]
# + [secrets.choice('!@#$%^&*') for _ in range(2)]
# )
#
# )
# r.set(RedisKey.pmachine('172.168.131.15'),0.0)
# print(r.get(RedisKey.pmachine(str(1),'172.168.131.15')))
# if os.path.exists(file[1])
# print("jinlaile")
# os.remove(file)
# print(file[1])
# C(1, 3).work()
# a="nimei"
# print("virsh dumpxml %s "
# "| grep -Pzo \"<interface type='bridge'>[\s\S] *<mac address.*\" "
# "|grep -Pzo '<mac address.*' "
# "| awk -F\\' '{print $2}' "
# "| head -1"
# % a)
# for i in range(1):
# print(i)
# print((datetime.datetime.now()+datetime.timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S"))
# print(datetime.datetime.now(tz=pytz.timezone('Asia/Shanghai')))
# import urllib.request
# from bs4 import BeautifulSoup
# import re
# html_doc = "https://mirrors.cloud.tencent.com/centos-vault/centos/8.4.2105/AppStream/Source/SPackages/"
# req = urllib.request.Request(html_doc)
# webpage = urllib.request.urlopen(req)
# html = webpage.read()
# soup = BeautifulSoup(html, 'html.parser') # 文档对象
#
# # 查找a标签,只会查找出一个a标签
# # print(soup.a)#<a class="sister" href="http://example.com/elsie" rel="external nofollow" rel="external nofollow" id="xiaodeng"><!-- Elsie --></a>
#
# total_src = []
# total_name = []
# for k in soup.find_all('a'):
# total_src.append(k['href']) # 查a标签的href值
# total_src.pop(0)
# for name in total_src:
# total_name.append(name.rsplit(".", 2)[0].rsplit("-", 2)[0])
# set_total_name = set(total_name)
# print(set_total_name)
#
# unique_list = []
# for i in set_total_name:
# print(i)
# group_name=[]
# for j in total_src:
# if j.startswith(i):
# group_name.append(j)
# # total_src.remove(j)
# if len(group_name) == 1:
# unique_list.append(group_name[0])
# else:
# unique_list.append(min(group_name))
# print(unique_list)
# a=','.join(unique_list)
# os.system("wget -r https://mirrors.cloud.tencent.com/centos-vault/centos/8.4.2105/AppStream/Source/SPackages/ -A {}".format(a))
# # print(total_src)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。