1 Star 1 Fork 0

far/LSTM_GoogleClusterTraceData

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
2metric_LSTM_data5minutes.py 5.21 KB
一键复制 编辑 原始数据 按行查看 历史
thangbk2209 提交于 2017-11-21 22:18 . fix structure to check code
import numpy as np
import matplotlib
from time import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import math
import keras
from numpy import polyfit
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.callbacks import TensorBoard, EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
# tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
# df = read_csv('/home/nguyen/learnRNNs/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
colnames = ['cpu_rate','mem_usage','disk_io_time','disk_space']
# series = Series.from_csv('data/Fuzzy_data_resource_JobId_6336594489_5minutes.csv', header=0)
df = read_csv('data/Fuzzy_data_resource_JobId_6336594489_5minutes.csv', header=None, index_col=False, names=colnames, engine='python')
length = len(df['cpu_rate'].values)
dataset = df.values
print length
deseasonalArr=[]
for name in colnames:
X = [i%365 for i in range(0, len(df[name].values))]
y = df[name].values
degree = 4
coef = polyfit(X, y, degree)
print('Coefficients: %s' % coef)
# create curve
curve = []
app=[]
for i in range(len(X)):
value = coef[-1]
for d in range(degree):
value += X[i]**(degree-d) * coef[d]
curve.append(np.array(value))
app.append(curve)
# create seasonally adjusted
values = df[name].values
diff = []
for i in range(len(values)):
value = values[i] - curve[i]
diff.append(value)
deseasonalArr.append(np.array(diff))
print "differencting"
print app
# data=[]
# for i in range(length):
# a=[]
# for j in range(len(deseasonalArr)):
# a.append(deseasonalArr[j][i])
# data.append(a)
# normalize the dataset
length = len(dataset)
scaler = MinMaxScaler(feature_range=(0, 1))
RAM_nomal = scaler.fit_transform(deseasonalArr[1])
CPU_nomal = scaler.fit_transform(deseasonalArr[0])
sliding_widow = [1,2,3,4,5]
# split into train and test sets
for sliding in sliding_widow:
print "sliding", sliding
train_size = int(length * 0.67)
test_size = length - train_size
batch_size_array = [8,16,32,64,128]
data = []
for i in range(length-sliding):
a=[]
for j in range(sliding):
a.append(CPU_nomal[i+j])
a.append(RAM_nomal[i+j])
# print a
data.append(a)
data = np.array(data)
print data [0]
# print data
trainX = data[0:train_size]
print trainX
trainY = CPU_nomal[sliding:train_size+sliding]
testX = data[train_size:length-sliding]
testY = dataset.T[1][train_size+sliding:length]
# reshape input to be [samples, time steps, features]
print "testx,testy"
print testX[0],testY[0]
print testX[1],testY[1]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
print trainX
# create and fit the LSTM network
for batch_size in batch_size_array:
print "batch_size= ", batch_size
model = Sequential()
model.add(LSTM(32,return_sequences=True,input_shape=(2*sliding, 1)))
model.add(LSTM(4))
model.add(Dense(1))
model.compile(loss='mean_squared_error' ,optimizer='adam' , metrics=['mean_squared_error'])
history = model.fit(trainX, trainY, epochs=2000, batch_size=batch_size, verbose=2,validation_split=0.1,
callbacks=[EarlyStopping(monitor='loss', patience=20, verbose=1)])
# make predictions
# list all data in history
print(history.history.keys())
# summarize history for accuracy
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# plt.show()
plt.savefig('resultsdataDeseasonal5minutes/2layer_32-4neu/history_sliding=%s_batchsize=%s.png'%(sliding,batch_size))
testPredict = model.predict(testX)
print testPredict
# invert predictions
testPredictInverse = scaler.inverse_transform(testPredict)
print testPredictInverse
# calculate root mean squared error
resultsCPUPredicts=[]
for i in range(len(testPredict)):
CPUPredict = testPredictInverse[i]+ app[0][train_size+sliding+i]
resultsCPUPredicts.append(np.array(CPUPredict))
print 'resultsCPUPredicts'
print len(testPredict)
print length - train_size-sliding
print resultsCPUPredicts
# testScoreRMSE = math.sqrt(mean_squared_error(testY, resultsCPUPredicts[:,0]))
# testScoreMAE = mean_absolute_error(testY, resultsCPUPredicts[:,0])
# print('Test Score: %.2f RMSE' % (testScoreRMSE))
# print('Test Score: %.2f MAE' % (testScoreMAE))
testNotInverseDf = pd.DataFrame(np.array(testPredict))
testNotInverseDf.to_csv('resultsdataDeseasonal5minutes/2layer_32-4neu/testPredict_sliding=%s_batchsize=%s.csv'%(sliding,batch_size), index=False, header=None)
testDf = pd.DataFrame(np.array(resultsCPUPredicts))
testDf.to_csv('resultsdataDeseasonal5minutes/2layer_32-4neu/testPredictInverse_sliding=%s_batchsize=%s.csv'%(sliding,batch_size), index=False, header=None)
# errorScore=[]
# errorScore.append(testScoreRMSE)
# errorScore.append(testScoreMAE)
# errorDf = pd.DataFrame(np.array(errorScore))
# errorDf.to_csv('resultsdataDeseasonal5minutes/2layer_32-4neu/error_sliding=%s_batchsize=%s.csv'%(sliding,batch_size), index=False, header=None)
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/littlefar/LSTM_GoogleClusterTraceData.git
git@gitee.com:littlefar/LSTM_GoogleClusterTraceData.git
littlefar
LSTM_GoogleClusterTraceData
LSTM_GoogleClusterTraceData
master

搜索帮助

0d507c66 1850385 C8b1a773 1850385