1 Star 0 Fork 0

TJU_AI/enose_baseline_paddle

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
data_loader.py 5.77 KB
一键复制 编辑 原始数据 按行查看 历史
DeXiu 提交于 2022-09-13 14:34 . Start kua_fire lstm
"""
@File : data_loader.py
@Author: tao.jing
@Date : 2022/5/18
@Desc :
"""
import os
import numpy as np
import h5py
from config import _C
# from pyts.approximation import PiecewiseAggregateApproximation
# from pyts.preprocessing import MinMaxScaler
'''def data_to_h5(data_array_2D,data_num):
gram_num = []
for data_number in range(data_num):
gram_channel = []
for channel in range(16):
channel_pop = 0
if channel == 12:
channel_pop = 1
if channel_pop == 0:
Y_smooth1 = data_array_2D[data_number, 0:250, channel]
Y_smooth2 = data_array_2D[data_number, 250:1250, channel]
Y_smooth3 = data_array_2D[data_number, 1250:1500, channel]
# print(Y_smooth1.shape, Y_smooth1.size)
X_smooth1 = range(250)
X_smooth2 = range(1000)
X_smooth3 = range(250)
smooth1 = [X_smooth1, Y_smooth1]
smooth2 = [X_smooth2, Y_smooth2]
smooth3 = [X_smooth3, Y_smooth3]
# PAA
transformer_smooth1 = PiecewiseAggregateApproximation(window_size=50)
result1 = transformer_smooth1.transform(smooth1)
transformer_smooth2 = PiecewiseAggregateApproximation(window_size=20)
result2 = transformer_smooth2.transform(smooth2)
transformer_smooth3 = PiecewiseAggregateApproximation(window_size=50)
result3 = transformer_smooth3.transform(smooth3)
result = np.concatenate((result1, result2, result3), axis=1)
# Scaling in interval [0,1]
scaler = MinMaxScaler()
scaled_X = scaler.transform(result)
scaled_X1 = []
for data in scaled_X[1, :]:
if data >= 1:
scaled_X1.append(0.99999)
else:
scaled_X1.append(data)
arccos_X = np.arccos(scaled_X1)
field = [a + b for a in arccos_X for b in arccos_X]
gram = np.cos(field)
gram = gram.reshape(-1, 60)
gram_channel.append(gram)
# print(np.array(gram_channel).shape)
gram_num.append(gram_channel)
return np.array(gram_num)
def convert_data_to_h5():
train_data_name_path = _C.path.train_data_name_path
test_data_name_path = _C.path.test_data_name_path
train_data_files = list()
test_data_files = list()
with open(train_data_name_path, 'r') as f:
lines = f.readlines()
for line in lines:
name = line.rstrip()
data_file_path =\
np.loadtxt(os.path.join(_C.path.enose_train_data_dir, name))
train_data_files.append(data_file_path)
print(len(train_data_files))
with open(test_data_name_path, 'r') as f:
lines = f.readlines()
for line in lines:
name = line.rstrip()
data_file_path =\
np.loadtxt(os.path.join(_C.path.enose_test_data_dir, name))
test_data_files.append(data_file_path)
print(len(test_data_files))
train_label_array = np.loadtxt(os.path.join(_C.path.train_label_name_path))
test_label_array = np.loadtxt(os.path.join(_C.path.test_label_name_path))
train_data_files = [x[:1500] for x in train_data_files]
train_data_array = np.asarray(train_data_files)
train_data_array_2D = data_to_h5(train_data_array,_C.data.train_num)
# print(train_data_array_2D.shape)
test_data_files = [x[:1500] for x in test_data_files]
test_data_array = np.asarray(test_data_files)
test_data_array_2D = data_to_h5(test_data_array,_C.data.test_num)
# print(test_data_array_2D.shape)
train_data_array_1D = train_data_array[:, :, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15]]
test_data_array_1D = test_data_array[:, :, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15]]
with h5py.File('enose_fire.h5', 'w') as hf:
hf['enose_train_data_2D'] = train_data_array_2D
hf['enose_train_data_1D'] = train_data_array_1D
hf['enose_train_label'] = train_label_array
hf['enose_test_data_2D'] = test_data_array_2D
hf['enose_test_data_1D'] = test_data_array_1D
hf['enose_test_label'] = test_label_array'''
def load_enose_data():
enose_train_data = None
enose_train_label = None
enose_test_data = None
enose_test_label = None
with h5py.File(_C.path.enose_data_h5_path, 'r') as hf:
enose_train_data_2D = hf['enose_train_data_2D'][:]
enose_train_data_1D = hf['enose_train_data_1D'][:]
enose_train_label = hf['enose_train_label'][:]
enose_test_data_2D = hf['enose_test_data_2D'][:]
enose_test_data_1D = hf['enose_test_data_1D'][:]
enose_test_label = hf['enose_test_label'][:]
#print(enose_data.shape)
#print(enose_label.shape)
return enose_train_data_2D,enose_train_data_1D, enose_train_label,enose_test_data_2D,enose_test_data_1D,enose_test_label
def load_split_data(valid_ratio=0.2, shuffle=True):
enose_train_data = None
enose_train_label = None
enose_test_data = None
enose_test_label = None
with h5py.File(_C.path.enose_data_h5_path, 'r') as hf:
enose_train_data_2D = hf['enose_train_data_2D'][:]
enose_train_data_1D = hf['enose_train_data_1D'][:]
enose_train_label = hf['enose_train_label'][:]
enose_test_data_2D = hf['enose_test_data_2D'][:]
enose_test_data_1D = hf['enose_test_data_1D'][:]
enose_test_label = hf['enose_test_label'][:]
return (enose_train_data_2D,enose_train_data_1D, enose_train_label), (enose_test_data_2D,enose_test_data_1D, enose_test_label)
if __name__ == '__main__':
# convert_data_to_h5()
load_enose_data()
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/tju_ai/enose-baseline-paddle.git
git@gitee.com:tju_ai/enose-baseline-paddle.git
tju_ai
enose-baseline-paddle
enose_baseline_paddle
master

搜索帮助

23e8dbc6 1850385 7e0993f3 1850385