1 Star 0 Fork 2

Xinli_Shi/Top-k influence node acquisition in a complex network model

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
main.py 12.03 KB
一键复制 编辑 原始数据 按行查看 历史
张帆 提交于 2023-05-09 08:49 . update main.py.
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import sir_model
import kshell_modified
import time
def make_graph(path):
#挑选一些边生成一个有向图
G = nx.DiGraph()
with open(path, 'r') as f:
# lines = f.readlines()
# random.seed()
# sample_nums = int(len(lines) * 0.001)
#随机采样一些边
# lines = random.sample(lines, sample_nums)
# lines = [line.strip() for line in lines]
# for line in lines:
# node_pair = line.split(' ')
# source_node = int(node_pair[0])
# target_node = int(node_pair[1])
# G.add_edge(source_node, target_node)
num = 0
for line in f:
if "\t" in line:
line = line.replace("\t"," ")
v = line.strip().split(" ")
source_node=int(v[0])
target_node=int(v[1])
G.add_edge(source_node,target_node)
num += 1
if num == 500:
break
for node in nx.nodes(G):
if G.degree(node) == 0:
G.remove_node(node)
print(G)
return G
def Degree_Centrality_of_Nodes(G):
starttime=time.time()
# 节点的度中心性
if len(G) <= 1:
return {n: 1 for n in G}
denominator = 1.0 / (len(G) - 1.0)
#度中心性标准化
degree_centrality = {node_i: degree_i * denominator for node_i, degree_i in G.degree()}
#返回一个dict,key是节点,value是度中心性
endtime=time.time()
print('度中心性算法: %f second' % (endtime - starttime))
return degree_centrality
def Closeness_Centrality_of_Nodes(G):
starttime=time.time()
# 节点的接近中心性
nodes = G.nodes()
Closeness_Centrality = {}
nodesum = nx.number_of_nodes(G)-1
#一个字典,key是node,value是接近中心性
for i in nodes:
sum_lengths = 0
#节点i到其余各个节点的最短路径长度之和
for j in nodes:
if (i != j):
try:
sum_lengths += nx.dijkstra_path_length(G, source=i, target=j)
except:
continue
#存在不可达的情况,会抛出一个exception
closeness_i = sum_lengths*1.0/nodesum
Closeness_Centrality[i] = closeness_i
endtime = time.time()
print('接近中心性算法: %f second' % (endtime - starttime))
return Closeness_Centrality
def Compute_Degree(G):
#将G.degree()的返回值变为字典,用于下面的核数计算函数
node_to_degree = {}
for item in G.degree():
node_to_degree[item[0]]=item[1]
return node_to_degree.copy()
def core_number(G):
starttime=time.time()
#核数计算
newG = G.copy()
node_to_core = {}
# key是node,value是coreness
ks = 1
while newG.nodes():
k_core_nodes = []
node_to_degree = Compute_Degree(newG)
#key是node,value是degree
current_k = min(node_to_degree.values())
#直接从最小的节点度开始计算k而不是从1开始
while True:
for k, v in node_to_degree.items():
if v == current_k:
k_core_nodes.append(k)
newG.remove_node(k)
node_to_degree = Compute_Degree(newG)
#更新图
if current_k not in node_to_degree.values():
break
for eachnode in k_core_nodes:
node_to_core[eachnode] = ks
ks += 1
endtime = time.time()
print('k-shell算法: %f second' % (endtime - starttime))
return node_to_core
def pagerank(G, alpha=0.7, max_iter=300, tol=1.0e-6, weight="weight"):
# 计算节点的pagerank值
starttime=time.time()
if len(G) == 0:
return {}
D = G
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
#随机生成一个copy
pr_vec = dict.fromkeys(W, 1.0 / N)
#pr向量的初值
unip_vec = dict.fromkeys(W, 1.0 / N)
#uniform personalization向量的初值
dangling_weights = unip_vec
#决定dangling node分配给全局的权重
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
for i in range(max_iter):
#开始迭代计算
xlast = pr_vec
pr_vec = dict.fromkeys(xlast.keys(), 0)
#pr向量初值
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
#计算dangling_nodes的PR值之和
for n in pr_vec:
for nbr in W[n]:
pr_vec[nbr] += alpha * xlast[n] * W[n][nbr][weight]
#将节点n的PR资源分配给各个节点,并循环
pr_vec[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * unip_vec.get(n, 0)
# 节点n加上dangling_nodes和均分的值
err = sum([abs(pr_vec[n] - xlast[n]) for n in pr_vec])
if err < N * tol:
return pr_vec
#检查
endtime = time.time()
print('pagerank算法: %f second' % (endtime - starttime))
raise nx.PowerIterationFailedConvergence(max_iter)
def hits(G, max_iter=300, tol=1.0e-6):
starttime=time.time()
# 节点的hub值和authority值
if len(G) == 0:
return {}, {}
#空图
node_to_hub = dict.fromkeys(G, 1.0 / G.number_of_nodes())
for i in range(max_iter):
node_to_hub_last = node_to_hub
node_to_hub = dict.fromkeys(node_to_hub_last.keys(), 0)
node_to_authority = dict.fromkeys(node_to_hub_last.keys(), 0)
for n in node_to_hub:
for nbr in G[n]:
node_to_authority[nbr] += node_to_hub_last[n] * G[n][nbr].get("weight", 1)
for n in node_to_hub:
for nbr in G[n]:
node_to_hub[n] += node_to_authority[nbr] * G[n][nbr].get("weight", 1)
s = 1.0 / max(node_to_hub.values())
for n in node_to_hub:
node_to_hub[n] *= s
s = 1.0 / max(node_to_authority.values())
for n in node_to_authority:
node_to_authority[n] *= s
err = sum([abs(node_to_hub[n] - node_to_hub_last[n]) for n in node_to_hub])
if err < tol:
break
else:
raise nx.PowerIterationFailedConvergence(max_iter) #超出迭代阈值抛出异常
s = 1.0 / sum(node_to_authority.values())
for n in node_to_authority:
node_to_authority[n] *= s
s = 1.0 / sum(node_to_hub.values())
for n in node_to_hub:
node_to_hub[n] *= s
endtime = time.time()
print('HITS算法: %f second' % (endtime - starttime))
return node_to_hub, node_to_authority
def func_degree_centrality(G):
#计算度中心性,降序
dc = nx.algorithms.centrality.degree_centrality(G)
return sorted(dc.items(), key=lambda x: x[1],reverse = True)
def func_closeness_centrality(G):
#计算介数中心性,降序
dc = list(Closeness_Centrality_of_Nodes(G).items())
return sorted(dc, key=lambda x: x[1],reverse = True)
def func_fused(fused_dict):
#计算混合影响力,降序
dc = list(fused_dict)
return sorted(dc, key=lambda x: x[1],reverse = True)
def main(G):
Degree_Centrality = Degree_Centrality_of_Nodes(G)
closeness = Closeness_Centrality_of_Nodes(G)
betweenness = nx.betweenness_centrality(G)
core = core_number(G)
pageranks = pagerank(G)
hubs, authorities = hits(G)
#节点的各个属性
fused = dict()
#混合算法,对节点的各个属性进行加权求和得出一个平均影响力值
for node in G.nodes:
deg = Degree_Centrality[node]
cl = closeness[node]
bet = betweenness[node]
co = core[node]
pr = pageranks[node]
auth = authorities[node]
M = 0.05 * deg + 0.15 * cl + 0.1 * bet + 0.3 * co + 0.25 * pr + 0.15 * auth
fused[node] = M
pageranks = sorted(pageranks.items(), key=lambda x: x[1], reverse=True)
print("请输入您想指定的k:")
k1 = int(input())
print("PageRank算法计算得,影响力前%d的节点为:"%(k1))
for i in range(k1):
print("节点 {}".format(pageranks[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in pageranks[:k1]]
other_nodes = [k for k, v in pageranks[k1:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=100, node_color='Blue', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=50, node_color='Yellow', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in pageranks[:k1]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./Pagerank所得出的结果.png")
plt.show()
print("---------------------------------------------")
authorities = sorted(authorities.items(), key=lambda x: x[1], reverse=True)
print("HITS算法计算得,影响力前%d的节点为:"%k1)
for i in range(k1):
print("节点 {}".format(authorities[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in authorities[:k1]]
other_nodes = [k for k, v in authorities[k1:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=100, node_color='Blue', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=50, node_color='Yellow', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in authorities[:k1]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./HITS算法所得出的结果.png")
plt.show()
print("---------------------------------------------")
fused = sorted(fused.items(), key=lambda x: x[1], reverse=True)
print("混合算法计算得,影响力前%d的节点为:"%k1)
for i in range(k1):
print("节点 {}".format(fused[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in fused[:k1]]
other_nodes = [k for k, v in fused[k1:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=100, node_color='Blue', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=50, node_color='Yellow', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in fused[:k1]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./混合算法结果.png")
plt.show()
print("---------------------------------------------")
ks_modified = kshell_modified.get_top_nodes(G)
print("改进的基于信息熵的k_shell算法计算得,影响力前%d的节点为:" %k1)
for i in range(k1):
print("节点 {}".format(ks_modified[i]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in fused[:k1]]
other_nodes = [k for k, v in fused[k1:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=100, node_color='Blue', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=50, node_color='Yellow', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in fused[:k1]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./改进的基于信息熵的k_shell算法结果.png")
plt.show()
print("---------------------------------------------")
#test
G=G.to_undirected()
result = sir_model.SIR(G, 0.55, 10, 0.25,func_degree_centrality(G))
print(" [S I R]")
print(result)
plt.plot(result[:, 0], 'g', label='Susceptibles')
plt.plot(result[:, 1], 'r', label='Infectious')
plt.plot(result[:, 2], 'b', label='Recovereds')
plt.legend(loc='right')
plt.xlabel('time')
plt.ylabel('number of people')
plt.savefig("./SIR模型结果图.png")
plt.show()
return 0
if __name__ == '__main__':
path = './课程设计数据集.txt'
if not os.path.exists(path):
print('未找到数据集')
exit(1)
G = make_graph(path)
main(G)
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/xinli-shi/top-k-influence-node-acquisition-in-a-complex-network-model.git
git@gitee.com:xinli-shi/top-k-influence-node-acquisition-in-a-complex-network-model.git
xinli-shi
top-k-influence-node-acquisition-in-a-complex-network-model
Top-k influence node acquisition in a complex network model
master

搜索帮助