代码拉取完成,页面将自动刷新
同步操作将从 AOMEY/HadoopHACluster 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
from xml.etree.ElementTree import ElementTree,Element
import subprocess
def read_xml(in_path):
tree = ElementTree()
tree.parse(in_path)
return tree
def if_match(node, kv_map):
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
def find_nodes(tree, path):
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
def start_zkserver(node):
start_cmd = []
start_cmd.append("docker")
start_cmd.append("exec")
start_cmd.append("-it")
start_cmd.append(node.attrib["host"])
start_cmd.append("zkServer.sh")
start_cmd.append("start")
print start_cmd
returncode = subprocess.call(start_cmd)
return returncode
def start_dfs(node):
start_cmd = []
start_cmd.append("docker")
start_cmd.append("exec")
start_cmd.append("-it")
start_cmd.append(node.attrib["host"])
start_cmd.append("start-dfs.sh")
print start_cmd
returncode = subprocess.call(start_cmd)
return returncode
def zkcluster_run(nodelist):
for node in nodelist:
start_zkserver(node)
def start_yarn(node):
start_cmd = []
start_cmd.append("docker")
start_cmd.append("exec")
start_cmd.append("-it")
start_cmd.append(node.attrib["host"])
start_cmd.append("start-yarn.sh")
print start_cmd
returncode = subprocess.call(start_cmd)
return returncode
def start_resourcemanager(node):
start_cmd = []
start_cmd.append("docker")
start_cmd.append("exec")
start_cmd.append("-it")
start_cmd.append(node.attrib["host"])
start_cmd.append("yarn-daemon.sh")
start_cmd.append("start")
start_cmd.append("resourcemanager")
print start_cmd
returncode = subprocess.call(start_cmd)
return returncode
def yarn_run(nodelist):
i = 0
for node in nodelist:
i = i + 1
if (i == 1):
start_yarn(node)
else:
start_resourcemanager(node)
if __name__ == "__main__":
cluster = read_xml("hacluster.xml")
clusterlist = find_nodes(cluster, "cluster")
# 1> zk cluster
zkcluster = get_node_by_keyvalue(clusterlist, {"type":"zk"})
zknodes = zkcluster[0].findall("node")
zkdockers = get_node_by_keyvalue(zknodes, {"platform":"docker"})
zkcluster_run(zkdockers)
# 2> hdfs cluster
hdfscluster = get_node_by_keyvalue(clusterlist, {"type":"hdfs"})
hdfsnodes = hdfscluster[0].findall("node")
hdfsdockers = get_node_by_keyvalue(hdfsnodes, {"platform":"docker"})
# 2.1> name node
namenodes = get_node_by_keyvalue(hdfsdockers, {"type":"namenode"})
start_dfs(namenodes[0])
# 3> yarn cluster
yarncluster = get_node_by_keyvalue(clusterlist, {"type":"yarn"})
yarnnodes = yarncluster[0].findall("node")
yarndockers = get_node_by_keyvalue(yarnnodes, {"platform":"docker"})
# 3.1> resource manager
resourcemanagers = get_node_by_keyvalue(yarndockers, {"type":"resourcemanager"})
yarn_run(resourcemanagers)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。