1 Star 2 Fork 0

jacinth2006/机器学习常见算法及演示

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
gat.py 7.13 KB
一键复制 编辑 原始数据 按行查看 历史
jacinth2006 提交于 2021-09-05 00:19 . 图注意力网络进行节点分类
#%%
import tensorflow as tf
import numpy as np
from utils import load_data
from sklearn.metrics import f1_score
from tensorflow.keras.metrics import Precision,Recall
class GatAggregator(tf.keras.layers.Layer):
def __init__(self,head_n,output_dim,final=False,activation=tf.keras.activations.relu):
super(GatAggregator,self).__init__()
self.head_n=head_n
self.output_dim=output_dim
self.final=final
self.activation=activation
def build(self,input_shape):
self.weights_=[]
self.bias_=[]
self.alpha1_=[]
self.alpha2_=[]
for i in range(self.head_n):
w=self.add_weight(
name="GatAggregator Weight {}".format(i),
shape=(input_shape[-1], self.output_dim),
#initializer=tf.keras.initializers.GlorotUniform,
dtype=tf.float32,
trainable=True)
self.weights_.append(w)
b=self.add_weight(
name="GatAggregator Bias {}".format(i),
shape=(self.output_dim,),
dtype=tf.float32,
trainable=True
)
self.bias_.append(b)
alpha=self.add_weight(
name="GatAggregator Alpha1 {}".format(i),
shape=(self.output_dim,1),
#initializer=tf.keras.initializers.GlorotUniform,
dtype=tf.float32,
trainable=True)
self.alpha1_.append(alpha)
alpha=self.add_weight(
name="GatAggregator Alpha2 {}".format(i),
shape=(self.output_dim,1),
#initializer=tf.keras.initializers.GlorotUniform,
dtype=tf.float32,
trainable=True)
self.alpha2_.append(alpha)
def call(self,x,adj):
h_head=[]
for i in range(self.head_n):
#将输入X线性变换一次
h=tf.matmul(x,self.weights_[i])+self.bias_[i]
#alpha1和alpha2是n*1矩阵,将h变换成一维,n*1维矩阵+1*n维矩阵变成n*n维,矩阵加法广播
#原始论文是这样计算的。
e=tf.matmul(h,self.alpha1_[i])+tf.transpose(tf.matmul(h,self.alpha2_[i]))
#实际情况中使用如下计算也能拟合的很好,这里e是n*1的,广播在下一行,下一行得到n*n矩阵
#到底有什么区别?这样好像最后计算的e是全局重要性矩阵,从全局来考虑的,下面两行是等价的。
#相比上面邻居节点间关系参数少了一半,更像是介于GraphSage和GAT之间。
#e=tf.matmul(h,self.alpha1_[i])+tf.matmul(h,self.alpha2_[i])
#e=tf.matmul(h,self.alpha1_[i])
#此处计算哈达玛积,即对应元素相乘,这样不相邻的相关系数变成0,e经过激活和归一化就是邻居重要性矩阵了
e=tf.multiply(e,adj)
e=tf.nn.leaky_relu(e)
e=tf.nn.softmax(e,axis=1)
h_head.append(tf.matmul(e,h))
if self.final:
#最后一层取多头的平均
output=self.activation(tf.reduce_mean(h_head,axis=0))
else:
#中间层次连接组合
output=self.activation(tf.concat(h_head,1))
return output
class GAT(tf.keras.models.Model):
def __init__(self,
output_dims,
hidden_dims,
head_nums):
super(GAT,self).__init__()
self.l1=GatAggregator(head_nums[0],
hidden_dims,
final=False)
self.l2=GatAggregator(head_nums[1],
output_dims,
final=True,
activation=lambda x:x)
def call(self,x,adj):
o=self.l1(x,adj)
o=self.l2(o,adj)
return o
#%%
data_path=r"D:\AI\tf\tf_offcial_tutorial\src\dataset\ind"
adj,feature,labels,train_mask,val_mask,test_mask=load_data(data_path,"cora")
feature=tf.convert_to_tensor(feature.toarray(),dtype=tf.float32)
adj=tf.convert_to_tensor(adj.toarray(),dtype=tf.float32)
labels=tf.convert_to_tensor(labels,dtype=tf.float32)
#%%
class F1_Score(tf.keras.metrics.Metric):
def __init__(self, name='f1_score', **kwargs):
super().__init__(name=name, **kwargs)
self.f1 = self.add_weight(name='f1', initializer='zeros')
self.precision_fn = Precision(thresholds=0.5)
self.recall_fn = Recall(thresholds=0.5)
def update_state(self, y_true, y_pred, sample_weight=None):
p = self.precision_fn(y_true, y_pred)
r = self.recall_fn(y_true, y_pred)
# since f1 is a variable, we use assign
self.f1.assign(2 * ((p * r) / (p + r + 1e-6)))
def result(self):
return self.f1
def reset_states(self):
# we also need to reset the state of the precision and recall objects
self.precision_fn.reset_states()
self.recall_fn.reset_states()
self.f1.assign(0)
head_nums=[4,2]
hidden_dims=10
output_dims=labels.shape[1]
model=GAT(output_dims,hidden_dims,head_nums)
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-2)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
acc_train=tf.keras.metrics.CategoricalAccuracy()
acc_val=tf.keras.metrics.CategoricalAccuracy()
acc_test=tf.keras.metrics.CategoricalAccuracy()
f1_test=F1_Score()
@tf.function
def train_step(features,adj,labels,train_mask):
with tf.GradientTape() as tape:
output=model(features,adj)
y_train=tf.boolean_mask(labels,train_mask)
y_predict=tf.boolean_mask(output,train_mask)
loss=tf.nn.softmax_cross_entropy_with_logits(y_train,y_predict)
gradient=tape.gradient(loss,model.trainable_variables)
optimizer.apply_gradients(zip(gradient,model.trainable_variables))
y_predict=tf.nn.softmax(y_predict,axis=1)
acc_train.update_state(y_train,y_predict)
output_val=model(features,adj)
y_val=tf.boolean_mask(labels,val_mask)
y_predict_val=tf.boolean_mask(output_val,val_mask)
y_predict_val=tf.nn.softmax(y_predict_val,axis=1)
acc_val.update_state(y_val,y_predict_val)
output_val=model(features,adj)
y_test=tf.boolean_mask(labels,test_mask)
y_predict_test=tf.boolean_mask(output_val,test_mask)
y_predict_test=tf.nn.softmax(y_predict_test,axis=1)
acc_test.update_state(y_test,y_predict_test)
f1_test.update_state(y_test,y_predict_test)
epochs=200
for i in range(epochs):
acc_train.reset_states()
acc_val.reset_states()
acc_test.reset_states()
f1_test.reset_states()
train_step(feature,adj,labels,train_mask)
print("echo:{};acc_train:{};acc_val:{};acc_test:{},f1_test:{}".format(i,acc_train.result(),
acc_val.result(),
acc_test.result(),
f1_test.result()))
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/jacinth2006/ML.git
git@gitee.com:jacinth2006/ML.git
jacinth2006
ML
机器学习常见算法及演示
master

搜索帮助

0d507c66 1850385 C8b1a773 1850385