代码拉取完成,页面将自动刷新
同步操作将从 Huang_Daxian/ 基于spark大数据的音乐推荐 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
from flask import render_template
from flask import Flask,request,redirect
# from livereload import Server
from pyspark.ml.recommendation import ALS
from pyspark.ml.recommendation import ALSModel
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import lit
import json
import time
def makeRecommendations(model,userID,number):
toRecommend = modelnew.itemFactors.selectExpr("id as artist").withColumn("user",lit(userID))
toRecommend2 = toRecommend.withColumn("artist",toRecommend['artist'].cast("Int")).withColumn("user",toRecommend['user'].cast('Int'))
toRecommend2.printSchema()
www = modelnew.transform(toRecommend2).select("artist","prediction").orderBy('prediction',ascending = False).take(number)
return www
def artistPredict(userID):
recommend = makeRecommendations(modelnew,userID,10)
www = recommend
for j in range(len(www)):
i = www[j]
#print(i.artist)
name = artistByID.filter(artistByID['artist'] == str(i.artist)).select('name').collect()[0]
i = i.asDict()
#print(name)
i.update({'name':name.name })
www[j] = i
f = open('/usr/local/spark/test/code/static/data/predict.json', 'w')
f.write(json.dumps(www))
f.close()
return json.dumps(www)
app = Flask(__name__)
@app.route('/')
def index():
#使用 render_template() 方法来渲染模板
return render_template('index.html')
@app.route('/direct',methods=['GET','POST'])
def predict():
if request.method == 'POST':
userID = request.form['userID']
print(userID)
message = userID
mess = artistPredict(userID)
print(mess)
time.sleep(3)
# return reder_template('genre-predict.html')
return render_template('genre-predict.html',message = mess,userid=message)
@app.route('/<filename>')
def req_file(filename):
return render_template(filename)
if __name__ == '__main__':
app.DEBUG=True#代码调试立即生效
app.jinja_env.auto_reload = True#模板调试立即生效
sc = SparkContext('local','test')
sc.setLogLevel("WARN")
spark = SparkSession.builder.getOrCreate()
modelnew = ALSModel.load("/usr/local/spark/Model/modelnew")
artistByID = spark.read.csv("/usr/local/spark/Model/artistByID").toDF("artist","name")
trainData = spark.read.csv("/usr/local/spark/Model/trainData").toDF("user","artist","count")
trainData.cache()
artistByID.cache()
trainData= trainData.withColumn('count',trainData['count'].cast('int'))
trainData.printSchema()
app.run()#用 run() 函数来让应用运行在本地服务器上
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。